summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go3244
-rw-r--r--pkg/marshal/primitive/primitive_abi_autogen_unsafe.go400
-rw-r--r--pkg/sentry/socket/control/control.go26
-rw-r--r--pkg/sentry/socket/hostinet/socket.go48
-rw-r--r--runsc/boot/filter/config.go60
5 files changed, 1944 insertions, 1834 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 4f8cf9eae..2c947e182 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -1352,45 +1352,45 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMkdirMeta) SizeBytes() int {
+func (f *FUSEOpenIn) SizeBytes() int {
return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMkdirMeta) Packed() bool {
+func (f *FUSEOpenIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1407,13 +1407,13 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1429,7 +1429,7 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1445,77 +1445,25 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSESetAttrIn) SizeBytes() int {
- return 88
+func (f *FUSEOpenOut) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
+func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
- f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
+func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -1523,23 +1471,23 @@ func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSESetAttrIn) Packed() bool {
+func (f *FUSEOpenOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1556,13 +1504,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1578,7 +1526,7 @@ func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1594,41 +1542,45 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpcode) SizeBytes() int {
- return 4
+func (f *FUSEMkdirMeta) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpcode) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
+func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
- *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpcode) Packed() bool {
- // Scalar newtypes are always packed.
+func (f *FUSEMkdirMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
+func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1645,13 +1597,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1667,7 +1619,7 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1675,7 +1627,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -1683,82 +1635,53 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitOut) SizeBytes() int {
- return 32 +
- 4*8
+func (f *FUSEDirentMeta) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
- dst = dst[2:]
- // Padding: dst[:sizeof(uint16)] ~= uint16(0)
- dst = dst[2:]
- // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
- dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- // Padding: var _ uint16 ~= src[:sizeof(uint16)]
- src = src[2:]
- // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
- src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitOut) Packed() bool {
+func (f *FUSEDirentMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1775,13 +1698,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1797,7 +1720,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1813,49 +1736,101 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrIn) SizeBytes() int {
- return 16
+func (f *FUSESetAttrIn) SizeBytes() int {
+ return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
+func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
- f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
+ f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEGetAttrIn) Packed() bool {
+func (f *FUSESetAttrIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1872,13 +1847,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1894,7 +1869,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1910,182 +1885,53 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrOut) SizeBytes() int {
- return 16 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSEInitIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
+func (f *FUSEInitIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (f *FUSEGetAttrOut) Packed() bool {
- return f.Attr.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return f.CopyOutN(cc, addr, f.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenIn) SizeBytes() int {
- return 8
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenIn) Packed() bool {
+func (f *FUSEInitIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2102,13 +1948,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2124,7 +1970,7 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2140,53 +1986,49 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReleaseIn) SizeBytes() int {
- return 24
+func (f *FUSEGetAttrIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
+ f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReleaseIn) Packed() bool {
+func (f *FUSEGetAttrIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2203,13 +2045,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2225,7 +2067,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2354,29 +2196,21 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMknodMeta) SizeBytes() int {
- return 16
+func (f *FUSEWriteOut) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2384,23 +2218,23 @@ func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMknodMeta) Packed() bool {
+func (f *FUSEWriteOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2417,13 +2251,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2439,7 +2273,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2455,88 +2289,53 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderIn) SizeBytes() int {
- return 28 +
- (*FUSEOpcode)(nil).SizeBytes() +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSEReleaseIn) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
- dst = dst[4:]
- f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
- dst = dst[f.Opcode.SizeBytes():]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
- src = src[f.Opcode.SizeBytes():]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderIn) Packed() bool {
- return f.Opcode.Packed() && f.Unique.Packed()
+func (f *FUSEReleaseIn) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2553,23 +2352,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2585,15 +2374,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2609,67 +2390,41 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderOut) SizeBytes() int {
- return 8 +
- (*FUSEOpID)(nil).SizeBytes()
+//go:nosplit
+func (f *FUSEOpID) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
- dst = dst[4:]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
+func (f *FUSEOpID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
+func (f *FUSEOpID) UnmarshalBytes(src []byte) {
+ *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderOut) Packed() bool {
- return f.Unique.Packed()
+func (f *FUSEOpID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
- if f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
- if f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2686,23 +2441,13 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2718,15 +2463,7 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2734,7 +2471,7 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -2742,53 +2479,88 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitIn) SizeBytes() int {
- return 16
+func (f *FUSEHeaderIn) SizeBytes() int {
+ return 28 +
+ (*FUSEOpcode)(nil).SizeBytes() +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
+ dst = dst[f.Opcode.SizeBytes():]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
+ src = src[f.Opcode.SizeBytes():]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitIn) Packed() bool {
- return true
+func (f *FUSEHeaderIn) Packed() bool {
+ return f.Opcode.Packed() && f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2805,13 +2577,23 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2827,7 +2609,15 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2992,49 +2782,71 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenOut) SizeBytes() int {
- return 16
+func (f *FUSEGetAttrOut) SizeBytes() int {
+ return 16 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenOut) Packed() bool {
- return true
+func (f *FUSEGetAttrOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3051,13 +2863,23 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3073,7 +2895,15 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3089,45 +2919,83 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteOut) SizeBytes() int {
- return 8
+func (f *FUSEEntryOut) SizeBytes() int {
+ return 40 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteOut) Packed() bool {
- return true
+func (f *FUSEEntryOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3144,13 +3012,23 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3166,7 +3044,15 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3183,40 +3069,40 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (f *FUSEOpID) SizeBytes() int {
- return 8
+func (f *FUSEOpcode) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
+func (f *FUSEOpcode) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpID) UnmarshalBytes(src []byte) {
- *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
+ *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpID) Packed() bool {
+func (f *FUSEOpcode) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3233,13 +3119,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3255,7 +3141,7 @@ func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3271,83 +3157,82 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEEntryOut) SizeBytes() int {
- return 40 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSEInitOut) SizeBytes() int {
+ return 32 +
+ 4*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
+func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
+ dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
+ src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEEntryOut) Packed() bool {
- return f.Attr.Packed()
+func (f *FUSEInitOut) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3364,23 +3249,13 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3396,15 +3271,7 @@ func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3634,53 +3501,53 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEDirentMeta) SizeBytes() int {
- return 24
+func (f *FUSEMknodMeta) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
+func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEDirentMeta) Packed() bool {
+func (f *FUSEMknodMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3697,13 +3564,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3719,7 +3586,140 @@ func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEHeaderOut) SizeBytes() int {
+ return 8 +
+ (*FUSEOpID)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ dst = dst[4:]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEHeaderOut) Packed() bool {
+ return f.Unique.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4473,6 +4473,235 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetEntries) SizeBytes() int {
+ return 4 +
+ (*TableName)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetEntries) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetEntries) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetEntries) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (en *ExtensionName) SizeBytes() int {
+ return 1 * XT_EXTENSION_MAXNAMELEN
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (en *ExtensionName) MarshalBytes(dst []byte) {
+ for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
+ dst[0] = byte(en[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (en *ExtensionName) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
+ en[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (en *ExtensionName) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (en *ExtensionName) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(en))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (en *ExtensionName) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(en), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (en *ExtensionName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return en.CopyOutN(cc, addr, en.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (en *ExtensionName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (en *ExtensionName) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
+ hdr.Len = en.SizeBytes()
+ hdr.Cap = en.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that en
+ // must live until the use above.
+ runtime.KeepAlive(en) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (tn *TableName) SizeBytes() int {
return 1 * XT_TABLE_MAXNAMELEN
@@ -5279,235 +5508,6 @@ func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTGetEntries) SizeBytes() int {
- return 4 +
- (*TableName)(nil).SizeBytes() +
- 1*4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTGetEntries) MarshalBytes(dst []byte) {
- i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
- dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
- dst = dst[4:]
- // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
- dst = dst[1*(4):]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
- i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
- src = src[i.Name.SizeBytes():]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
- src = src[1*(4):]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IPTGetEntries) Packed() bool {
- return i.Name.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTGetEntries) MarshalUnsafe(dst []byte) {
- if i.Name.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
- if i.Name.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (en *ExtensionName) SizeBytes() int {
- return 1 * XT_EXTENSION_MAXNAMELEN
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (en *ExtensionName) MarshalBytes(dst []byte) {
- for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
- dst[0] = byte(en[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (en *ExtensionName) UnmarshalBytes(src []byte) {
- for idx := 0; idx < XT_EXTENSION_MAXNAMELEN; idx++ {
- en[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (en *ExtensionName) Packed() bool {
- // Array newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (en *ExtensionName) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(en))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (en *ExtensionName) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(en), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (en *ExtensionName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
- hdr.Len = en.SizeBytes()
- hdr.Cap = en.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that en
- // must live until the use above.
- runtime.KeepAlive(en) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (en *ExtensionName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return en.CopyOutN(cc, addr, en.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (en *ExtensionName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
- hdr.Len = en.SizeBytes()
- hdr.Cap = en.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that en
- // must live until the use above.
- runtime.KeepAlive(en) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (en *ExtensionName) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(en)))
- hdr.Len = en.SizeBytes()
- hdr.Cap = en.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that en
- // must live until the use above.
- runtime.KeepAlive(en) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (i *IP6TReplace) SizeBytes() int {
return 24 +
(*TableName)(nil).SizeBytes() +
@@ -7391,62 +7391,41 @@ func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *Sigevent) SizeBytes() int {
- return 20 +
- 1*44
+//go:nosplit
+func (s *SignalSet) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *Sigevent) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Value))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Notify))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Tid))
- dst = dst[4:]
- for idx := 0; idx < 44; idx++ {
- dst[0] = byte(s.UnRemainder[idx])
- dst = dst[1:]
- }
+func (s *SignalSet) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*s))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *Sigevent) UnmarshalBytes(src []byte) {
- s.Value = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Notify = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Tid = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- for idx := 0; idx < 44; idx++ {
- s.UnRemainder[idx] = src[0]
- src = src[1:]
- }
+func (s *SignalSet) UnmarshalBytes(src []byte) {
+ *s = SignalSet(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *Sigevent) Packed() bool {
+func (s *SignalSet) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *Sigevent) MarshalUnsafe(dst []byte) {
+func (s *SignalSet) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *Sigevent) UnmarshalUnsafe(src []byte) {
+func (s *SignalSet) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7463,13 +7442,13 @@ func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *Sigevent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalSet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7485,7 +7464,7 @@ func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
+func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7493,7 +7472,7 @@ func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
runtime.KeepAlive(s) // escapes: replaced by intrinsic.
@@ -7501,41 +7480,62 @@ func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (s *SignalSet) SizeBytes() int {
- return 8
+func (s *Sigevent) SizeBytes() int {
+ return 20 +
+ 1*44
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalSet) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*s))
+func (s *Sigevent) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Value))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Notify))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Tid))
+ dst = dst[4:]
+ for idx := 0; idx < 44; idx++ {
+ dst[0] = byte(s.UnRemainder[idx])
+ dst = dst[1:]
+ }
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalSet) UnmarshalBytes(src []byte) {
- *s = SignalSet(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (s *Sigevent) UnmarshalBytes(src []byte) {
+ s.Value = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Notify = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Tid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 44; idx++ {
+ s.UnRemainder[idx] = src[0]
+ src = src[1:]
+ }
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SignalSet) Packed() bool {
- // Scalar newtypes are always packed.
+func (s *Sigevent) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalSet) MarshalUnsafe(dst []byte) {
+func (s *Sigevent) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalSet) UnmarshalUnsafe(src []byte) {
+func (s *Sigevent) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *Sigevent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7552,13 +7552,13 @@ func (s *SignalSet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalSet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sigevent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *Sigevent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7574,7 +7574,7 @@ func (s *SignalSet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, erro
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
+func (s *Sigevent) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7582,7 +7582,7 @@ func (s *SignalSet) WriteTo(w io.Writer) (int64, error) {
hdr.Len = s.SizeBytes()
hdr.Cap = s.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
runtime.KeepAlive(s) // escapes: replaced by intrinsic.
@@ -7730,6 +7730,467 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *TCPInfo) SizeBytes() int {
+ return 192
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TCPInfo) MarshalBytes(dst []byte) {
+ dst[0] = byte(t.State)
+ dst = dst[1:]
+ dst[0] = byte(t.CaState)
+ dst = dst[1:]
+ dst[0] = byte(t.Retransmits)
+ dst = dst[1:]
+ dst[0] = byte(t.Probes)
+ dst = dst[1:]
+ dst[0] = byte(t.Backoff)
+ dst = dst[1:]
+ dst[0] = byte(t.Options)
+ dst = dst[1:]
+ dst[0] = byte(t.WindowScale)
+ dst = dst[1:]
+ dst[0] = byte(t.DeliveryRateAppLimited)
+ dst = dst[1:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TCPInfo) UnmarshalBytes(src []byte) {
+ t.State = uint8(src[0])
+ src = src[1:]
+ t.CaState = uint8(src[0])
+ src = src[1:]
+ t.Retransmits = uint8(src[0])
+ src = src[1:]
+ t.Probes = uint8(src[0])
+ src = src[1:]
+ t.Backoff = uint8(src[0])
+ src = src[1:]
+ t.Options = uint8(src[0])
+ src = src[1:]
+ t.WindowScale = uint8(src[0])
+ src = src[1:]
+ t.DeliveryRateAppLimited = uint8(src[0])
+ src = src[1:]
+ t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TCPInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TCPInfo) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *ControlMessageCredentials) SizeBytes() int {
+ return 12
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
+ c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *ControlMessageCredentials) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(c))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(c), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *InetAddr) SizeBytes() int {
+ return 1 * 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetAddr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(i[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InetAddr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 4; idx++ {
+ i[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InetAddr) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InetAddr) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InetAddr) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SockAddrLink) SizeBytes() int {
return 12 +
1*8
@@ -8039,198 +8500,6 @@ func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (c *ControlMessageCredentials) SizeBytes() int {
- return 12
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
- c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (c *ControlMessageCredentials) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(c))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(c), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return c.CopyOutN(cc, addr, c.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (i *InetAddr) SizeBytes() int {
- return 1 * 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *InetAddr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 4; idx++ {
- dst[0] = byte(i[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *InetAddr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 4; idx++ {
- i[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *InetAddr) Packed() bool {
- // Array newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *InetAddr) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *InetAddr) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SockAddrInet) SizeBytes() int {
return 4 +
(*InetAddr)(nil).SizeBytes() +
@@ -8369,275 +8638,6 @@ func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *TCPInfo) SizeBytes() int {
- return 192
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TCPInfo) MarshalBytes(dst []byte) {
- dst[0] = byte(t.State)
- dst = dst[1:]
- dst[0] = byte(t.CaState)
- dst = dst[1:]
- dst[0] = byte(t.Retransmits)
- dst = dst[1:]
- dst[0] = byte(t.Probes)
- dst = dst[1:]
- dst[0] = byte(t.Backoff)
- dst = dst[1:]
- dst[0] = byte(t.Options)
- dst = dst[1:]
- dst[0] = byte(t.WindowScale)
- dst = dst[1:]
- dst[0] = byte(t.DeliveryRateAppLimited)
- dst = dst[1:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
- dst = dst[8:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TCPInfo) UnmarshalBytes(src []byte) {
- t.State = uint8(src[0])
- src = src[1:]
- t.CaState = uint8(src[0])
- src = src[1:]
- t.Retransmits = uint8(src[0])
- src = src[1:]
- t.Probes = uint8(src[0])
- src = src[1:]
- t.Backoff = uint8(src[0])
- src = src[1:]
- t.Options = uint8(src[0])
- src = src[1:]
- t.WindowScale = uint8(src[0])
- src = src[1:]
- t.DeliveryRateAppLimited = uint8(src[0])
- src = src[1:]
- t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (t *TCPInfo) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TCPInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (i *Inet6Addr) SizeBytes() int {
return 1 * 16
@@ -8843,136 +8843,6 @@ func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *ItimerVal) SizeBytes() int {
- return 0 +
- (*Timeval)(nil).SizeBytes() +
- (*Timeval)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *ItimerVal) MarshalBytes(dst []byte) {
- i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
- dst = dst[i.Interval.SizeBytes():]
- i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
- dst = dst[i.Value.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *ItimerVal) UnmarshalBytes(src []byte) {
- i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
- src = src[i.Interval.SizeBytes():]
- i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
- src = src[i.Value.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *ItimerVal) Packed() bool {
- return i.Interval.Packed() && i.Value.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *ItimerVal) MarshalUnsafe(dst []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (c *ClockT) SizeBytes() int {
return 8
@@ -9202,95 +9072,6 @@ func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimerID) SizeBytes() int {
- return 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimerID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimerID) UnmarshalBytes(src []byte) {
- *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (t *TimerID) Packed() bool {
- // Scalar newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimerID) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimerID) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (sxts *StatxTimestamp) SizeBytes() int {
return 16
}
@@ -10058,6 +9839,225 @@ func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *ItimerVal) SizeBytes() int {
+ return 0 +
+ (*Timeval)(nil).SizeBytes() +
+ (*Timeval)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *ItimerVal) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *ItimerVal) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *ItimerVal) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *ItimerVal) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (t *TimerID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TimerID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TimerID) UnmarshalBytes(src []byte) {
+ *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TimerID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TimerID) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TimerID) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (w *Winsize) SizeBytes() int {
return 8
}
diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
index 0ddb07673..9e49dbe6a 100644
--- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
+++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -25,40 +25,40 @@ var _ marshal.Marshallable = (*Uint8)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint32) SizeBytes() int {
- return 4
+func (u *Uint16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
+func (u *Uint16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint32) UnmarshalBytes(src []byte) {
- *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (u *Uint16) UnmarshalBytes(src []byte) {
+ *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint32) Packed() bool {
+func (u *Uint16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint32) MarshalUnsafe(dst []byte) {
+func (u *Uint16) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint32) UnmarshalUnsafe(src []byte) {
+func (u *Uint16) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -75,13 +75,13 @@ func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -97,7 +97,7 @@ func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -112,14 +112,14 @@ func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
+// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
//go:nosplit
-func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
+func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -138,14 +138,14 @@ func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32)
return length, err
}
-// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
+// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
//go:nosplit
-func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
+func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -164,13 +164,13 @@ func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32)
return length, err
}
-// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
-func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
+// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
+func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -182,13 +182,13 @@ func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
-func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
+// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
+func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -202,40 +202,40 @@ func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int64) SizeBytes() int {
- return 8
+func (i *Int32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
+func (i *Int32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int64) UnmarshalBytes(src []byte) {
- *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (i *Int32) UnmarshalBytes(src []byte) {
+ *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int64) Packed() bool {
+func (i *Int32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int64) MarshalUnsafe(dst []byte) {
+func (i *Int32) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int64) UnmarshalUnsafe(src []byte) {
+func (i *Int32) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -252,13 +252,13 @@ func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -274,7 +274,7 @@ func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int64) WriteTo(w io.Writer) (int64, error) {
+func (i *Int32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -289,14 +289,14 @@ func (i *Int64) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
+// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
//go:nosplit
-func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
+func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -315,14 +315,14 @@ func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (i
return length, err
}
-// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
+// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
//go:nosplit
-func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
+func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -341,13 +341,13 @@ func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (
return length, err
}
-// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
-func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
+// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
+func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -359,13 +359,13 @@ func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
-func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
+// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
+func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -379,40 +379,40 @@ func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint64) SizeBytes() int {
- return 8
+func (u *Uint32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
+func (u *Uint32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint64) UnmarshalBytes(src []byte) {
- *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (u *Uint32) UnmarshalBytes(src []byte) {
+ *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint64) Packed() bool {
+func (u *Uint32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint64) MarshalUnsafe(dst []byte) {
+func (u *Uint32) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint64) UnmarshalUnsafe(src []byte) {
+func (u *Uint32) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -429,13 +429,13 @@ func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -451,7 +451,7 @@ func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -466,14 +466,14 @@ func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
+// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
//go:nosplit
-func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
+func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -492,14 +492,14 @@ func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64)
return length, err
}
-// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
+// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
//go:nosplit
-func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
+func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -518,13 +518,13 @@ func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64)
return length, err
}
-// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
-func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
+// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
+func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -536,13 +536,13 @@ func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
-func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
+// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
+func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -556,40 +556,40 @@ func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int8) SizeBytes() int {
- return 1
+func (i *Int64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int8) MarshalBytes(dst []byte) {
- dst[0] = byte(*i)
+func (i *Int64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int8) UnmarshalBytes(src []byte) {
- *i = Int8(int8(src[0]))
+func (i *Int64) UnmarshalBytes(src []byte) {
+ *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int8) Packed() bool {
+func (i *Int64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int8) MarshalUnsafe(dst []byte) {
+func (i *Int64) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int8) UnmarshalUnsafe(src []byte) {
+func (i *Int64) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -606,13 +606,13 @@ func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -628,7 +628,7 @@ func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int8) WriteTo(w io.Writer) (int64, error) {
+func (i *Int64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -643,14 +643,14 @@ func (i *Int8) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
+// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
//go:nosplit
-func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
+func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -669,14 +669,14 @@ func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int
return length, err
}
-// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
+// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
//go:nosplit
-func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
+func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -695,13 +695,13 @@ func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (in
return length, err
}
-// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
-func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
+// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
+func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -713,13 +713,13 @@ func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
-func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
+// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
+func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -733,40 +733,40 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint8) SizeBytes() int {
- return 1
+func (u *Uint64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint8) MarshalBytes(dst []byte) {
- dst[0] = byte(*u)
+func (u *Uint64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint8) UnmarshalBytes(src []byte) {
- *u = Uint8(uint8(src[0]))
+func (u *Uint64) UnmarshalBytes(src []byte) {
+ *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint8) Packed() bool {
+func (u *Uint64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint8) MarshalUnsafe(dst []byte) {
+func (u *Uint64) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint8) UnmarshalUnsafe(src []byte) {
+func (u *Uint64) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -783,13 +783,13 @@ func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -805,7 +805,7 @@ func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -820,14 +820,14 @@ func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
+// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
//go:nosplit
-func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
+func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -846,14 +846,14 @@ func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (i
return length, err
}
-// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
+// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
//go:nosplit
-func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
+func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -872,13 +872,13 @@ func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (
return length, err
}
-// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
-func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
+// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
+func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -890,13 +890,13 @@ func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
-func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
+// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
+func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -910,40 +910,40 @@ func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int16) SizeBytes() int {
- return 2
+func (i *Int8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
+func (i *Int8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*i)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int16) UnmarshalBytes(src []byte) {
- *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
+func (i *Int8) UnmarshalBytes(src []byte) {
+ *i = Int8(int8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int16) Packed() bool {
+func (i *Int8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int16) MarshalUnsafe(dst []byte) {
+func (i *Int8) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int16) UnmarshalUnsafe(src []byte) {
+func (i *Int8) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -960,13 +960,13 @@ func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -982,7 +982,7 @@ func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int16) WriteTo(w io.Writer) (int64, error) {
+func (i *Int8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -997,14 +997,14 @@ func (i *Int16) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
+// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
//go:nosplit
-func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
+func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1023,14 +1023,14 @@ func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (i
return length, err
}
-// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
+// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
//go:nosplit
-func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
+func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1049,13 +1049,13 @@ func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (
return length, err
}
-// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
-func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
+// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
+func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1067,13 +1067,13 @@ func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
-func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
+// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
+func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1087,40 +1087,40 @@ func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint16) SizeBytes() int {
- return 2
+func (u *Uint8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
+func (u *Uint8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*u)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint16) UnmarshalBytes(src []byte) {
- *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
+func (u *Uint8) UnmarshalBytes(src []byte) {
+ *u = Uint8(uint8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint16) Packed() bool {
+func (u *Uint8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint16) MarshalUnsafe(dst []byte) {
+func (u *Uint8) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint16) UnmarshalUnsafe(src []byte) {
+func (u *Uint8) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1137,13 +1137,13 @@ func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1159,7 +1159,7 @@ func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1174,14 +1174,14 @@ func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
+// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
//go:nosplit
-func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
+func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1200,14 +1200,14 @@ func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16)
return length, err
}
-// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
+// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
//go:nosplit
-func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
+func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1226,13 +1226,13 @@ func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16)
return length, err
}
-// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
-func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
+// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
+func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1244,13 +1244,13 @@ func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
-func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
+// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
+func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1264,40 +1264,40 @@ func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int32) SizeBytes() int {
- return 4
+func (i *Int16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
+func (i *Int16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int32) UnmarshalBytes(src []byte) {
- *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
+func (i *Int16) UnmarshalBytes(src []byte) {
+ *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int32) Packed() bool {
+func (i *Int16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int32) MarshalUnsafe(dst []byte) {
+func (i *Int16) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int32) UnmarshalUnsafe(src []byte) {
+func (i *Int16) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1314,13 +1314,13 @@ func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1336,7 +1336,7 @@ func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int32) WriteTo(w io.Writer) (int64, error) {
+func (i *Int16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1351,14 +1351,14 @@ func (i *Int32) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
+// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
//go:nosplit
-func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
+func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1377,14 +1377,14 @@ func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (i
return length, err
}
-// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
+// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
//go:nosplit
-func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
+func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1403,13 +1403,13 @@ func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (
return length, err
}
-// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
-func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
+// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
+func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1421,13 +1421,13 @@ func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
-func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
+// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
+func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go
index 5b81e8379..b88cdca48 100644
--- a/pkg/sentry/socket/control/control.go
+++ b/pkg/sentry/socket/control/control.go
@@ -503,6 +503,14 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con
cmsgs.Unix.Credentials = scmCreds
i += binary.AlignUp(length, width)
+ case linux.SO_TIMESTAMP:
+ if length < linux.SizeOfTimeval {
+ return socket.ControlMessages{}, syserror.EINVAL
+ }
+ cmsgs.IP.HasTimestamp = true
+ binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &cmsgs.IP.Timestamp)
+ i += binary.AlignUp(length, width)
+
default:
// Unknown message type.
return socket.ControlMessages{}, syserror.EINVAL
@@ -529,6 +537,15 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con
cmsgs.IP.PacketInfo = packetInfo
i += binary.AlignUp(length, width)
+ case linux.IP_RECVORIGDSTADDR:
+ var addr linux.SockAddrInet
+ if length < addr.SizeBytes() {
+ return socket.ControlMessages{}, syserror.EINVAL
+ }
+ binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr)
+ cmsgs.IP.OriginalDstAddress = &addr
+ i += binary.AlignUp(length, width)
+
default:
return socket.ControlMessages{}, syserror.EINVAL
}
@@ -542,6 +559,15 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con
binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], usermem.ByteOrder, &cmsgs.IP.TClass)
i += binary.AlignUp(length, width)
+ case linux.IPV6_RECVORIGDSTADDR:
+ var addr linux.SockAddrInet6
+ if length < addr.SizeBytes() {
+ return socket.ControlMessages{}, syserror.EINVAL
+ }
+ binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr)
+ cmsgs.IP.OriginalDstAddress = &addr
+ i += binary.AlignUp(length, width)
+
default:
return socket.ControlMessages{}, syserror.EINVAL
}
diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go
index 9b337d286..be418df2e 100644
--- a/pkg/sentry/socket/hostinet/socket.go
+++ b/pkg/sentry/socket/hostinet/socket.go
@@ -331,17 +331,17 @@ func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr
switch level {
case linux.SOL_IP:
switch name {
- case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_PKTINFO:
+ case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_PKTINFO, linux.IP_RECVORIGDSTADDR:
optlen = sizeofInt32
}
case linux.SOL_IPV6:
switch name {
- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY:
+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:
optlen = sizeofInt32
}
case linux.SOL_SOCKET:
switch name {
- case linux.SO_ERROR, linux.SO_KEEPALIVE, linux.SO_SNDBUF, linux.SO_RCVBUF, linux.SO_REUSEADDR:
+ case linux.SO_ERROR, linux.SO_KEEPALIVE, linux.SO_SNDBUF, linux.SO_RCVBUF, linux.SO_REUSEADDR, linux.SO_TIMESTAMP:
optlen = sizeofInt32
case linux.SO_LINGER:
optlen = syscall.SizeofLinger
@@ -377,24 +377,24 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt []
switch level {
case linux.SOL_IP:
switch name {
- case linux.IP_TOS, linux.IP_RECVTOS:
+ case linux.IP_TOS, linux.IP_RECVTOS, linux.IP_RECVORIGDSTADDR:
optlen = sizeofInt32
case linux.IP_PKTINFO:
optlen = linux.SizeOfControlMessageIPPacketInfo
}
case linux.SOL_IPV6:
switch name {
- case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY:
+ case linux.IPV6_TCLASS, linux.IPV6_RECVTCLASS, linux.IPV6_V6ONLY, linux.IPV6_RECVORIGDSTADDR:
optlen = sizeofInt32
}
case linux.SOL_SOCKET:
switch name {
- case linux.SO_SNDBUF, linux.SO_RCVBUF, linux.SO_REUSEADDR:
+ case linux.SO_SNDBUF, linux.SO_RCVBUF, linux.SO_REUSEADDR, linux.SO_TIMESTAMP:
optlen = sizeofInt32
}
case linux.SOL_TCP:
switch name {
- case linux.TCP_NODELAY:
+ case linux.TCP_NODELAY, linux.TCP_INQ:
optlen = sizeofInt32
}
}
@@ -513,24 +513,48 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
controlMessages := socket.ControlMessages{}
for _, unixCmsg := range unixControlMessages {
switch unixCmsg.Header.Level {
- case syscall.SOL_IP:
+ case linux.SOL_SOCKET:
switch unixCmsg.Header.Type {
- case syscall.IP_TOS:
+ case linux.SO_TIMESTAMP:
+ controlMessages.IP.HasTimestamp = true
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfTimeval], usermem.ByteOrder, &controlMessages.IP.Timestamp)
+ }
+
+ case linux.SOL_IP:
+ switch unixCmsg.Header.Type {
+ case linux.IP_TOS:
controlMessages.IP.HasTOS = true
binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], usermem.ByteOrder, &controlMessages.IP.TOS)
- case syscall.IP_PKTINFO:
+ case linux.IP_PKTINFO:
controlMessages.IP.HasIPPacketInfo = true
var packetInfo linux.ControlMessageIPPacketInfo
binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], usermem.ByteOrder, &packetInfo)
controlMessages.IP.PacketInfo = packetInfo
+
+ case linux.IP_RECVORIGDSTADDR:
+ var addr linux.SockAddrInet
+ binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)
+ controlMessages.IP.OriginalDstAddress = &addr
}
- case syscall.SOL_IPV6:
+ case linux.SOL_IPV6:
switch unixCmsg.Header.Type {
- case syscall.IPV6_TCLASS:
+ case linux.IPV6_TCLASS:
controlMessages.IP.HasTClass = true
binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], usermem.ByteOrder, &controlMessages.IP.TClass)
+
+ case linux.IPV6_RECVORIGDSTADDR:
+ var addr linux.SockAddrInet6
+ binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr)
+ controlMessages.IP.OriginalDstAddress = &addr
+ }
+
+ case linux.SOL_TCP:
+ switch unixCmsg.Header.Type {
+ case linux.TCP_INQ:
+ controlMessages.IP.HasInq = true
+ binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], usermem.ByteOrder, &controlMessages.IP.Inq)
}
}
}
diff --git a/runsc/boot/filter/config.go b/runsc/boot/filter/config.go
index a7c4ebb0c..4e3bb9ac7 100644
--- a/runsc/boot/filter/config.go
+++ b/runsc/boot/filter/config.go
@@ -343,6 +343,16 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IP),
+ seccomp.EqualTo(syscall.IP_PKTINFO),
+ },
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IP),
+ seccomp.EqualTo(syscall.IP_RECVORIGDSTADDR),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_IPV6),
seccomp.EqualTo(syscall.IPV6_TCLASS),
},
@@ -358,6 +368,11 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IPV6),
+ seccomp.EqualTo(linux.IPV6_RECVORIGDSTADDR),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_SOCKET),
seccomp.EqualTo(syscall.SO_ERROR),
},
@@ -393,6 +408,11 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_SOCKET),
+ seccomp.EqualTo(syscall.SO_TIMESTAMP),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_TCP),
seccomp.EqualTo(syscall.TCP_NODELAY),
},
@@ -401,6 +421,11 @@ func hostInetFilters() seccomp.SyscallRules {
seccomp.EqualTo(syscall.SOL_TCP),
seccomp.EqualTo(syscall.TCP_INFO),
},
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_TCP),
+ seccomp.EqualTo(linux.TCP_INQ),
+ },
},
syscall.SYS_IOCTL: []seccomp.Rule{
{
@@ -449,6 +474,13 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_SOCKET),
+ seccomp.EqualTo(syscall.SO_TIMESTAMP),
+ seccomp.MatchAny{},
+ seccomp.EqualTo(4),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_TCP),
seccomp.EqualTo(syscall.TCP_NODELAY),
seccomp.MatchAny{},
@@ -456,6 +488,13 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_TCP),
+ seccomp.EqualTo(linux.TCP_INQ),
+ seccomp.MatchAny{},
+ seccomp.EqualTo(4),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_IP),
seccomp.EqualTo(syscall.IP_TOS),
seccomp.MatchAny{},
@@ -470,6 +509,20 @@ func hostInetFilters() seccomp.SyscallRules {
},
{
seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IP),
+ seccomp.EqualTo(syscall.IP_PKTINFO),
+ seccomp.MatchAny{},
+ seccomp.EqualTo(4),
+ },
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IP),
+ seccomp.EqualTo(syscall.IP_RECVORIGDSTADDR),
+ seccomp.MatchAny{},
+ seccomp.EqualTo(4),
+ },
+ {
+ seccomp.MatchAny{},
seccomp.EqualTo(syscall.SOL_IPV6),
seccomp.EqualTo(syscall.IPV6_TCLASS),
seccomp.MatchAny{},
@@ -482,6 +535,13 @@ func hostInetFilters() seccomp.SyscallRules {
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(syscall.SOL_IPV6),
+ seccomp.EqualTo(linux.IPV6_RECVORIGDSTADDR),
+ seccomp.MatchAny{},
+ seccomp.EqualTo(4),
+ },
},
syscall.SYS_SHUTDOWN: []seccomp.Rule{
{