diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-12-12 05:02:25 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-12-12 05:02:25 +0000 |
commit | a49eb922dea2363b731c91f556d6c21abd344dce (patch) | |
tree | a6849e6ebd6ca0c5e6fc658ff2702f2dbd501acc /pkg | |
parent | 77af208d67e2213b34fc339906e23f4a6d40c268 (diff) | |
parent | 4b697aae55eacac75f5e9c76aacd40981720c3fd (diff) |
Merge release-20201208.0-38-g4b697aae5 (automated)
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 3314 | ||||
-rw-r--r-- | pkg/marshal/primitive/primitive_abi_autogen_unsafe.go | 400 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_abi_autogen_unsafe.go | 220 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go | 470 | ||||
-rw-r--r-- | pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go | 178 | ||||
-rw-r--r-- | pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go | 200 |
6 files changed, 2391 insertions, 2391 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 9b24cca01..66bb07720 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -100,85 +100,53 @@ var _ marshal.Marshallable = (*XTCounters)(nil) var _ marshal.Marshallable = (*XTGetRevision)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IOCallback) SizeBytes() int { - return 64 +func (i *IOEvent) SizeBytes() int { + return 32 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IOCallback) MarshalBytes(dst []byte) { +func (i *IOEvent) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.FD)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Buf)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Obj)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Offset)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result2)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Flags)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD)) - dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IOCallback) UnmarshalBytes(src []byte) { +func (i *IOEvent) UnmarshalBytes(src []byte) { i.Data = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Key = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - i.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.ReqPrio = int16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.FD = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Buf = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - i.Bytes = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Obj = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Offset = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Result = int64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Reserved2 = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Result2 = int64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.ResFD = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IOCallback) Packed() bool { +func (i *IOEvent) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IOCallback) MarshalUnsafe(dst []byte) { +func (i *IOEvent) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IOCallback) UnmarshalUnsafe(src []byte) { +func (i *IOEvent) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -195,13 +163,13 @@ func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -217,7 +185,7 @@ func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) { +func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -233,53 +201,85 @@ func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IOEvent) SizeBytes() int { - return 32 +func (i *IOCallback) SizeBytes() int { + return 64 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IOEvent) MarshalBytes(dst []byte) { +func (i *IOCallback) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Obj)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.FD)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Buf)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result2)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Offset)) dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Flags)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD)) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IOEvent) UnmarshalBytes(src []byte) { +func (i *IOCallback) UnmarshalBytes(src []byte) { i.Data = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Obj = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Key = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + i.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.ReqPrio = int16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.FD = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Buf = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Result = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Bytes = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Result2 = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Offset = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + i.Reserved2 = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] + i.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.ResFD = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IOEvent) Packed() bool { +func (i *IOCallback) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IOEvent) MarshalUnsafe(dst []byte) { +func (i *IOCallback) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IOEvent) UnmarshalUnsafe(src []byte) { +func (i *IOCallback) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -296,13 +296,13 @@ func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -318,7 +318,7 @@ func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) { +func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -797,45 +797,67 @@ func UnmarshalUnsafeCapUserDataSlice(dst []CapUserData, src []byte) (int, error) } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FOwnerEx) SizeBytes() int { - return 8 +func (f *Flock) SizeBytes() int { + return 24 + + 1*4 + + 1*4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FOwnerEx) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) +func (f *Flock) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Type)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Whence)) + dst = dst[2:] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Start)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Len)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Pid)) dst = dst[4:] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FOwnerEx) UnmarshalBytes(src []byte) { - f.Type = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.PID = int32(usermem.ByteOrder.Uint32(src[:4])) +func (f *Flock) UnmarshalBytes(src []byte) { + f.Type = int16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.Whence = int16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4]) + src = src[1*(4):] + f.Start = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Len = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Pid = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4]) + src = src[1*(4):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FOwnerEx) Packed() bool { +func (f *Flock) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FOwnerEx) MarshalUnsafe(dst []byte) { +func (f *Flock) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FOwnerEx) UnmarshalUnsafe(src []byte) { +func (f *Flock) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -852,13 +874,13 @@ func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *Flock) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -874,7 +896,7 @@ func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) { +func (f *Flock) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -890,67 +912,45 @@ func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *Flock) SizeBytes() int { - return 24 + - 1*4 + - 1*4 +func (f *FOwnerEx) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *Flock) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Type)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Whence)) - dst = dst[2:] - // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} - dst = dst[1*(4):] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Start)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Len)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Pid)) +func (f *FOwnerEx) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) dst = dst[4:] - // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} - dst = dst[1*(4):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *Flock) UnmarshalBytes(src []byte) { - f.Type = int16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.Whence = int16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4]) - src = src[1*(4):] - f.Start = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Len = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Pid = int32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FOwnerEx) UnmarshalBytes(src []byte) { + f.Type = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.PID = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4]) - src = src[1*(4):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *Flock) Packed() bool { +func (f *FOwnerEx) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *Flock) MarshalUnsafe(dst []byte) { +func (f *FOwnerEx) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *Flock) UnmarshalUnsafe(src []byte) { +func (f *FOwnerEx) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -967,13 +967,13 @@ func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) ( // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *Flock) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -989,7 +989,7 @@ func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (f *Flock) WriteTo(writer io.Writer) (int64, error) { +func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1352,82 +1352,41 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitOut) SizeBytes() int { - return 32 + - 4*8 +//go:nosplit +func (f *FUSEOpcode) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) - dst = dst[2:] - // Padding: dst[:sizeof(uint16)] ~= uint16(0) - dst = dst[2:] - // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} - dst = dst[4*(8):] +func (f *FUSEOpcode) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitOut) UnmarshalBytes(src []byte) { - f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - // Padding: var _ uint16 ~= src[:sizeof(uint16)] - src = src[2:] - // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) - src = src[4*(8):] +func (f *FUSEOpcode) UnmarshalBytes(src []byte) { + *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitOut) Packed() bool { +func (f *FUSEOpcode) Packed() bool { + // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { +func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1444,13 +1403,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1466,7 +1425,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1474,7 +1433,7 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -1482,83 +1441,41 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEEntryOut) SizeBytes() int { - return 40 + - (*FUSEAttr)(nil).SizeBytes() +//go:nosplit +func (f *FUSEOpID) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEEntryOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec)) - dst = dst[4:] - f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) - dst = dst[f.Attr.SizeBytes():] +func (f *FUSEOpID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEEntryOut) UnmarshalBytes(src []byte) { - f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) - src = src[f.Attr.SizeBytes():] +func (f *FUSEOpID) UnmarshalBytes(src []byte) { + *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEEntryOut) Packed() bool { - return f.Attr.Packed() +func (f *FUSEOpID) Packed() bool { + // Scalar newtypes are always packed. + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) { - if f.Attr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSEOpID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) { - if f.Attr.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1575,23 +1492,13 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1607,15 +1514,7 @@ func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1623,7 +1522,7 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -1631,25 +1530,21 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenOut) SizeBytes() int { - return 16 +func (f *FUSEOpenIn) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) +func (f *FUSEOpenIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -1657,23 +1552,23 @@ func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenOut) Packed() bool { +func (f *FUSEOpenIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1690,13 +1585,13 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1712,7 +1607,7 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1728,16 +1623,16 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSECreateMeta) SizeBytes() int { +func (f *FUSEMknodMeta) SizeBytes() int { return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSECreateMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) - dst = dst[4:] +func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) @@ -1745,11 +1640,11 @@ func (f *FUSECreateMeta) MarshalBytes(dst []byte) { } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] +func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] @@ -1758,23 +1653,23 @@ func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSECreateMeta) Packed() bool { +func (f *FUSEMknodMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1791,13 +1686,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1813,7 +1708,7 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1829,53 +1724,53 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEDirentMeta) SizeBytes() int { - return 24 +func (f *FUSEInitIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) +func (f *FUSEInitIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEInitIn) UnmarshalBytes(src []byte) { + f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEDirentMeta) Packed() bool { +func (f *FUSEInitIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1892,13 +1787,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1914,7 +1809,7 @@ func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1930,41 +1825,83 @@ func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpcode) SizeBytes() int { - return 4 +func (f *FUSEEntryOut) SizeBytes() int { + return 40 + + (*FUSEAttr)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpcode) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) +func (f *FUSEEntryOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec)) + dst = dst[4:] + f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) + dst = dst[f.Attr.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpcode) UnmarshalBytes(src []byte) { - *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (f *FUSEEntryOut) UnmarshalBytes(src []byte) { + f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) + src = src[f.Attr.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpcode) Packed() bool { - // Scalar newtypes are always packed. - return true +func (f *FUSEEntryOut) Packed() bool { + return f.Attr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) { + if f.Attr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) { + if f.Attr.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1981,13 +1918,23 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2003,7 +1950,15 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { +func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2011,7 +1966,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := w.Write(buf) + length, err := writer.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -2019,47 +1974,41 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderIn) SizeBytes() int { - return 28 + - (*FUSEOpcode)(nil).SizeBytes() + - (*FUSEOpID)(nil).SizeBytes() +func (f *FUSEWriteIn) SizeBytes() int { + return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) - dst = dst[4:] - f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) - dst = dst[f.Opcode.SizeBytes():] - f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) - dst = dst[f.Unique.SizeBytes():] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) +func (f *FUSEWriteIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { - f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) - src = src[f.Opcode.SizeBytes():] - f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) - src = src[f.Unique.SizeBytes():] - f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) +func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -2067,40 +2016,23 @@ func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderIn) Packed() bool { - return f.Opcode.Packed() && f.Unique.Packed() +func (f *FUSEWriteIn) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2117,23 +2049,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2149,15 +2071,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2173,67 +2087,53 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderOut) SizeBytes() int { - return 8 + - (*FUSEOpID)(nil).SizeBytes() +func (f *FUSECreateMeta) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) +func (f *FUSECreateMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) - dst = dst[f.Unique.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { - f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) - src = src[f.Unique.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderOut) Packed() bool { - return f.Unique.Packed() +func (f *FUSECreateMeta) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { - if f.Unique.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { - if f.Unique.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2250,23 +2150,13 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2282,15 +2172,7 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2306,49 +2188,101 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEGetAttrIn) SizeBytes() int { - return 16 +func (f *FUSESetAttrIn) SizeBytes() int { + return 88 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) +func (f *FUSESetAttrIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { - f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) { + f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] + f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEGetAttrIn) Packed() bool { +func (f *FUSESetAttrIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { +func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { +func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2365,13 +2299,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2387,7 +2321,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2403,45 +2337,49 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenIn) SizeBytes() int { - return 8 +func (f *FUSEGetAttrIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) +func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { + f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenIn) Packed() bool { +func (f *FUSEGetAttrIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { +func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2458,13 +2396,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2480,7 +2418,7 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2496,41 +2434,101 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpID) SizeBytes() int { - return 8 +func (f *FUSEAttr) SizeBytes() int { + return 88 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) +func (f *FUSEAttr) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpID) UnmarshalBytes(src []byte) { - *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) +func (f *FUSEAttr) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpID) Packed() bool { - // Scalar newtypes are always packed. +func (f *FUSEAttr) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpID) MarshalUnsafe(dst []byte) { +func (f *FUSEAttr) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { +func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2547,13 +2545,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2569,7 +2567,7 @@ func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { +func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2577,7 +2575,7 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := w.Write(buf) + length, err := writer.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -2722,19 +2720,19 @@ func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEWriteIn) SizeBytes() int { +func (f *FUSEReadIn) SizeBytes() int { return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEWriteIn) MarshalBytes(dst []byte) { +func (f *FUSEReadIn) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) dst = dst[8:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) dst = dst[4:] usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) dst = dst[8:] @@ -2745,14 +2743,14 @@ func (f *FUSEWriteIn) MarshalBytes(dst []byte) { } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { +func (f *FUSEReadIn) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] @@ -2764,23 +2762,23 @@ func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEWriteIn) Packed() bool { +func (f *FUSEReadIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { +func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2797,13 +2795,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2819,7 +2817,7 @@ func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2928,101 +2926,53 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSESetAttrIn) SizeBytes() int { - return 88 +func (f *FUSEReleaseIn) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSESetAttrIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] +func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) { - f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] +func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSESetAttrIn) Packed() bool { +func (f *FUSEReleaseIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) { +func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3039,13 +2989,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3061,7 +3011,7 @@ func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3170,53 +3120,88 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitIn) SizeBytes() int { - return 16 +func (f *FUSEHeaderIn) SizeBytes() int { + return 28 + + (*FUSEOpcode)(nil).SizeBytes() + + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) +func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) + dst = dst[f.Opcode.SizeBytes():] + f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) + dst = dst[f.Unique.SizeBytes():] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitIn) UnmarshalBytes(src []byte) { - f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { + f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) + src = src[f.Opcode.SizeBytes():] + f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) + src = src[f.Unique.SizeBytes():] + f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitIn) Packed() bool { - return true +func (f *FUSEHeaderIn) Packed() bool { + return f.Opcode.Packed() && f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3233,13 +3218,23 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3255,7 +3250,15 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3271,101 +3274,67 @@ func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEAttr) SizeBytes() int { - return 88 +func (f *FUSEHeaderOut) SizeBytes() int { + return 8 + + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEAttr) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) +func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) dst = dst[4:] + f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) + dst = dst[f.Unique.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEAttr) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { + f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) + src = src[f.Unique.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEAttr) Packed() bool { - return true +func (f *FUSEHeaderOut) Packed() bool { + return f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEAttr) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { + if f.Unique.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { + if f.Unique.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3382,13 +3351,23 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3404,7 +3383,15 @@ func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3420,65 +3407,82 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReadIn) SizeBytes() int { - return 40 +func (f *FUSEInitOut) SizeBytes() int { + return 32 + + 4*8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReadIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) +func (f *FUSEInitOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) + dst = dst[2:] + // Padding: dst[:sizeof(uint16)] ~= uint16(0) + dst = dst[2:] + // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} + dst = dst[4*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReadIn) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEInitOut) UnmarshalBytes(src []byte) { + f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + // Padding: var _ uint16 ~= src[:sizeof(uint16)] + src = src[2:] + // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) + src = src[4*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReadIn) Packed() bool { +func (f *FUSEInitOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { +func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3495,13 +3499,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3517,7 +3521,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3533,53 +3537,49 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReleaseIn) SizeBytes() int { - return 24 +func (f *FUSEOpenOut) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { +func (f *FUSEOpenOut) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { +func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReleaseIn) Packed() bool { +func (f *FUSEOpenOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3596,13 +3596,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3618,7 +3618,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3634,53 +3634,53 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEMknodMeta) SizeBytes() int { - return 16 +func (f *FUSEDirentMeta) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) +func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEMknodMeta) Packed() bool { +func (f *FUSEDirentMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3697,13 +3697,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3719,7 +3719,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -4473,6 +4473,161 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IPTGetinfo) SizeBytes() int { + return 12 + + (*TableName)(nil).SizeBytes() + + 4*NF_INET_NUMHOOKS + + 4*NF_INET_NUMHOOKS +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IPTGetinfo) MarshalBytes(dst []byte) { + i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) + dst = dst[i.Name.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) + dst = dst[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) + dst = dst[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) + dst = dst[4:] + } + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *IPTGetinfo) UnmarshalBytes(src []byte) { + i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) + src = src[i.Name.SizeBytes():] + i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *IPTGetinfo) Packed() bool { + return i.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *IPTGetinfo) MarshalUnsafe(dst []byte) { + if i.Name.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) { + if i.Name.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Name.Packed() { + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Name.Packed() { + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) { + if !i.Name.Packed() { + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (i *IPTGetEntries) SizeBytes() int { return 4 + (*TableName)(nil).SizeBytes() + @@ -5353,19 +5508,23 @@ func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IPTGetinfo) SizeBytes() int { - return 12 + +func (i *IP6TReplace) SizeBytes() int { + return 24 + (*TableName)(nil).SizeBytes() + 4*NF_INET_NUMHOOKS + 4*NF_INET_NUMHOOKS } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IPTGetinfo) MarshalBytes(dst []byte) { +func (i *IP6TReplace) MarshalBytes(dst []byte) { i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) dst = dst[i.Name.SizeBytes():] usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + dst = dst[4:] for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) dst = dst[4:] @@ -5374,18 +5533,22 @@ func (i *IPTGetinfo) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) dst = dst[4:] } - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IPTGetinfo) UnmarshalBytes(src []byte) { +func (i *IP6TReplace) UnmarshalBytes(src []byte) { i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) src = src[i.Name.SizeBytes():] i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] @@ -5394,43 +5557,43 @@ func (i *IPTGetinfo) UnmarshalBytes(src []byte) { i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } - i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IPTGetinfo) Packed() bool { +func (i *IP6TReplace) Packed() bool { return i.Name.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IPTGetinfo) MarshalUnsafe(dst []byte) { +func (i *IP6TReplace) MarshalUnsafe(dst []byte) { if i.Name.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { - // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. i.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) { +func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { if i.Name.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { - // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. i.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -5452,15 +5615,15 @@ func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. + // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -5484,9 +5647,9 @@ func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) { +func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, i.SizeBytes()) i.MarshalBytes(buf) length, err := writer.Write(buf) @@ -5856,169 +6019,6 @@ func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IP6TReplace) SizeBytes() int { - return 24 + - (*TableName)(nil).SizeBytes() + - 4*NF_INET_NUMHOOKS + - 4*NF_INET_NUMHOOKS -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IP6TReplace) MarshalBytes(dst []byte) { - i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) - dst = dst[i.Name.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) - dst = dst[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) - dst = dst[4:] - } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) - dst = dst[4:] - } - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) - dst = dst[8:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IP6TReplace) UnmarshalBytes(src []byte) { - i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) - src = src[i.Name.SizeBytes():] - i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *IP6TReplace) Packed() bool { - return i.Name.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IP6TReplace) MarshalUnsafe(dst []byte) { - if i.Name.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) - } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { - if i.Name.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) - } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrNetlink) SizeBytes() int { return 12 } @@ -7008,120 +7008,6 @@ func (s *SemInfo) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *ShmInfo) SizeBytes() int { - return 44 + - 1*4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *ShmInfo) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs)) - dst = dst[4:] - // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} - dst = dst[1*(4):] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses)) - dst = dst[8:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *ShmInfo) UnmarshalBytes(src []byte) { - s.UsedIDs = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4]) - src = src[1*(4):] - s.ShmTot = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.ShmRss = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.ShmSwp = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.SwapAttempts = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.SwapSuccesses = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *ShmInfo) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *ShmInfo) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *ShmInfo) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *ShmidDS) SizeBytes() int { return 40 + (*IPCPerm)(nil).SizeBytes() + @@ -7391,6 +7277,120 @@ func (s *ShmParams) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *ShmInfo) SizeBytes() int { + return 44 + + 1*4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *ShmInfo) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs)) + dst = dst[4:] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *ShmInfo) UnmarshalBytes(src []byte) { + s.UsedIDs = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4]) + src = src[1*(4):] + s.ShmTot = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.ShmRss = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.ShmSwp = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.SwapAttempts = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.SwapSuccesses = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *ShmInfo) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *ShmInfo) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *ShmInfo) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit func (s *SignalSet) SizeBytes() int { return 8 @@ -7730,70 +7730,165 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrLink) SizeBytes() int { - return 12 + +func (l *Linger) SizeBytes() int { + return 8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (l *Linger) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (l *Linger) UnmarshalBytes(src []byte) { + l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + l.Linger = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (l *Linger) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (l *Linger) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(l)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (l *Linger) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(l), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return l.CopyOutN(cc, addr, l.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (l *Linger) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SockAddrInet) SizeBytes() int { + return 4 + + (*InetAddr)(nil).SizeBytes() + 1*8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrLink) MarshalBytes(dst []byte) { +func (s *SockAddrInet) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex)) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType)) + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port)) dst = dst[2:] - dst[0] = byte(s.PacketType) - dst = dst[1:] - dst[0] = byte(s.HardwareAddrLen) - dst = dst[1:] - for idx := 0; idx < 8; idx++ { - dst[0] = byte(s.HardwareAddr[idx]) - dst = dst[1:] - } + s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()]) + dst = dst[s.Addr.SizeBytes():] + // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0} + dst = dst[1*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrLink) UnmarshalBytes(src []byte) { +func (s *SockAddrInet) UnmarshalBytes(src []byte) { s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] - s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2])) + s.Port = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] - s.PacketType = src[0] - src = src[1:] - s.HardwareAddrLen = src[0] - src = src[1:] - for idx := 0; idx < 8; idx++ { - s.HardwareAddr[idx] = src[0] - src = src[1:] - } + s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()]) + src = src[s.Addr.SizeBytes():] + // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8]) + src = src[1*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockAddrLink) Packed() bool { - return true +func (s *SockAddrInet) Packed() bool { + return s.Addr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrLink) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) +func (s *SockAddrInet) MarshalUnsafe(dst []byte) { + if s.Addr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrLink) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) +func (s *SockAddrInet) UnmarshalUnsafe(src []byte) { + if s.Addr.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7810,13 +7905,23 @@ func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7832,7 +7937,15 @@ func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) { +func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7848,50 +7961,70 @@ func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrUnix) SizeBytes() int { - return 2 + - 1*UnixPathMax +func (s *SockAddrLink) SizeBytes() int { + return 12 + + 1*8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrUnix) MarshalBytes(dst []byte) { +func (s *SockAddrLink) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) dst = dst[2:] - for idx := 0; idx < UnixPathMax; idx++ { - dst[0] = byte(s.Path[idx]) + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType)) + dst = dst[2:] + dst[0] = byte(s.PacketType) + dst = dst[1:] + dst[0] = byte(s.HardwareAddrLen) + dst = dst[1:] + for idx := 0; idx < 8; idx++ { + dst[0] = byte(s.HardwareAddr[idx]) dst = dst[1:] } } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrUnix) UnmarshalBytes(src []byte) { +func (s *SockAddrLink) UnmarshalBytes(src []byte) { s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] - for idx := 0; idx < UnixPathMax; idx++ { - s.Path[idx] = int8(src[0]) + s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.PacketType = src[0] + src = src[1:] + s.HardwareAddrLen = src[0] + src = src[1:] + for idx := 0; idx < 8; idx++ { + s.HardwareAddr[idx] = src[0] src = src[1:] } } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockAddrUnix) Packed() bool { +func (s *SockAddrLink) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrUnix) MarshalUnsafe(dst []byte) { +func (s *SockAddrLink) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(s)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) { +func (s *SockAddrLink) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(s), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7908,13 +8041,13 @@ func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7930,7 +8063,7 @@ func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) { +func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -7946,95 +8079,100 @@ func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (l *Linger) SizeBytes() int { - return 8 +func (s *SockAddrUnix) SizeBytes() int { + return 2 + + 1*UnixPathMax } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (l *Linger) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger)) - dst = dst[4:] +func (s *SockAddrUnix) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) + dst = dst[2:] + for idx := 0; idx < UnixPathMax; idx++ { + dst[0] = byte(s.Path[idx]) + dst = dst[1:] + } } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (l *Linger) UnmarshalBytes(src []byte) { - l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - l.Linger = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] +func (s *SockAddrUnix) UnmarshalBytes(src []byte) { + s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + for idx := 0; idx < UnixPathMax; idx++ { + s.Path[idx] = int8(src[0]) + src = src[1:] + } } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (l *Linger) Packed() bool { +func (s *SockAddrUnix) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (l *Linger) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(l)) +func (s *SockAddrUnix) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (l *Linger) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(l), src) +func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that l + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return l.CopyOutN(cc, addr, l.SizeBytes()) +func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that l + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (l *Linger) WriteTo(writer io.Writer) (int64, error) { +func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that l + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return int64(length), err } @@ -8595,125 +8733,227 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrInet) SizeBytes() int { - return 4 + - (*InetAddr)(nil).SizeBytes() + - 1*8 +func (s *SockAddrInet6) SizeBytes() int { + return 12 + + 1*16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrInet) MarshalBytes(dst []byte) { +func (s *SockAddrInet6) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) dst = dst[2:] usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port)) dst = dst[2:] - s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()]) - dst = dst[s.Addr.SizeBytes():] - // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0} - dst = dst[1*(8):] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo)) + dst = dst[4:] + for idx := 0; idx < 16; idx++ { + dst[0] = byte(s.Addr[idx]) + dst = dst[1:] + } + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id)) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrInet) UnmarshalBytes(src []byte) { +func (s *SockAddrInet6) UnmarshalBytes(src []byte) { s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] s.Port = uint16(usermem.ByteOrder.Uint16(src[:2])) src = src[2:] - s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()]) - src = src[s.Addr.SizeBytes():] - // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8]) - src = src[1*(8):] + s.Flowinfo = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < 16; idx++ { + s.Addr[idx] = src[0] + src = src[1:] + } + s.Scope_id = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockAddrInet) Packed() bool { - return s.Addr.Packed() +func (s *SockAddrInet6) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrInet) MarshalUnsafe(dst []byte) { - if s.Addr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) +func (s *SockAddrInet6) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *ItimerVal) SizeBytes() int { + return 0 + + (*Timeval)(nil).SizeBytes() + + (*Timeval)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *ItimerVal) MarshalBytes(dst []byte) { + i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) + dst = dst[i.Interval.SizeBytes():] + i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) + dst = dst[i.Value.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *ItimerVal) UnmarshalBytes(src []byte) { + i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) + src = src[i.Interval.SizeBytes():] + i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) + src = src[i.Value.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *ItimerVal) Packed() bool { + return i.Interval.Packed() && i.Value.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *ItimerVal) MarshalUnsafe(dst []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { - // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) + // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrInet) UnmarshalUnsafe(src []byte) { - if s.Addr.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) +func (i *ItimerVal) UnmarshalUnsafe(src []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) } else { - // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) + // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. +func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) +func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. +func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. + i.UnmarshalBytes(buf) // escapes: fallback. return length, err } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) +func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) length, err := writer.Write(buf) return int64(length), err } @@ -8721,124 +8961,293 @@ func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrInet6) SizeBytes() int { - return 12 + - 1*16 +//go:nosplit +func (t *TimerID) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrInet6) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo)) +func (t *TimerID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*t)) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (t *TimerID) UnmarshalBytes(src []byte) { + *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4]))) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (t *TimerID) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (t *TimerID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(t)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (t *TimerID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(t), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return t.CopyOutN(cc, addr, t.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (t *TimerID) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (sxts *StatxTimestamp) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (sxts *StatxTimestamp) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec)) dst = dst[4:] - for idx := 0; idx < 16; idx++ { - dst[0] = byte(s.Addr[idx]) - dst = dst[1:] - } - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id)) + // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrInet6) UnmarshalBytes(src []byte) { - s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.Port = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.Flowinfo = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) { + sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - for idx := 0; idx < 16; idx++ { - s.Addr[idx] = src[0] - src = src[1:] - } - s.Scope_id = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockAddrInet6) Packed() bool { +func (sxts *StatxTimestamp) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrInet6) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) +func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(sxts)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) +func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(sxts), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that sxts // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) +func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return sxts.CopyOutN(cc, addr, sxts.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that sxts // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) { +func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that sxts // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (u *Utime) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *Utime) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *Utime) UnmarshalBytes(src []byte) { + u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *Utime) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *Utime) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(u)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *Utime) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(u), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *Utime) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. return int64(length), err } @@ -9111,233 +9520,6 @@ func UnmarshalUnsafeTimespecSlice(dst []Timespec, src []byte) (int, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *ItimerVal) SizeBytes() int { - return 0 + - (*Timeval)(nil).SizeBytes() + - (*Timeval)(nil).SizeBytes() -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *ItimerVal) MarshalBytes(dst []byte) { - i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) - dst = dst[i.Interval.SizeBytes():] - i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) - dst = dst[i.Value.SizeBytes():] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *ItimerVal) UnmarshalBytes(src []byte) { - i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) - src = src[i.Interval.SizeBytes():] - i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) - src = src[i.Value.SizeBytes():] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *ItimerVal) Packed() bool { - return i.Interval.Packed() && i.Value.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *ItimerVal) MarshalUnsafe(dst []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) - } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *ItimerVal) UnmarshalUnsafe(src []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) - } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (sxts *StatxTimestamp) SizeBytes() int { - return 16 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (sxts *StatxTimestamp) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec)) - dst = dst[4:] - // Padding: dst[:sizeof(int32)] ~= int32(0) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) { - sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ int32 ~= src[:sizeof(int32)] - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (sxts *StatxTimestamp) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(sxts)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(sxts), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that sxts - // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return sxts.CopyOutN(cc, addr, sxts.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that sxts - // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that sxts - // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (tv *Timeval) SizeBytes() int { return 16 } @@ -9876,188 +10058,6 @@ func (t *Tms) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (t *TimerID) SizeBytes() int { - return 4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *TimerID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*t)) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *TimerID) UnmarshalBytes(src []byte) { - *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4]))) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (t *TimerID) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *TimerID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(t)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *TimerID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(t), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return t.CopyOutN(cc, addr, t.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (t *TimerID) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (u *Utime) SizeBytes() int { - return 16 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Utime) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) - dst = dst[8:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Utime) UnmarshalBytes(src []byte) { - u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (u *Utime) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Utime) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Utime) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (u *Utime) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (w *Winsize) SizeBytes() int { return 8 } diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go index 0ddb07673..469fb972f 100644 --- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go +++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go @@ -25,40 +25,40 @@ var _ marshal.Marshallable = (*Uint8)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint32) SizeBytes() int { - return 4 +func (u *Uint8) SizeBytes() int { + return 1 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint32) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*u)) +func (u *Uint8) MarshalBytes(dst []byte) { + dst[0] = byte(*u) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint32) UnmarshalBytes(src []byte) { - *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (u *Uint8) UnmarshalBytes(src []byte) { + *u = Uint8(uint8(src[0])) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Uint32) Packed() bool { +func (u *Uint8) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint32) MarshalUnsafe(dst []byte) { +func (u *Uint8) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(u)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint32) UnmarshalUnsafe(src []byte) { +func (u *Uint8) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(u), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -75,13 +75,13 @@ func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return u.CopyOutN(cc, addr, u.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -97,7 +97,7 @@ func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (u *Uint32) WriteTo(w io.Writer) (int64, error) { +func (u *Uint8) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -112,14 +112,14 @@ func (u *Uint32) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory. +// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. //go:nosplit -func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) { +func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint32)(nil).SizeBytes() + size := (*Uint8)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -138,14 +138,14 @@ func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) return length, err } -// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory. +// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. //go:nosplit -func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) { +func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint32)(nil).SizeBytes() + size := (*Uint8)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -164,13 +164,13 @@ func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) return length, err } -// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32. -func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) { +// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. +func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint32)(nil).SizeBytes() + size := (*Uint8)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -182,13 +182,13 @@ func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32. -func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) { +// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. +func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint32)(nil).SizeBytes() + size := (*Uint8)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -202,40 +202,40 @@ func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (i *Int64) SizeBytes() int { - return 8 +func (i *Int16) SizeBytes() int { + return 2 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Int64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*i)) +func (i *Int16) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(*i)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Int64) UnmarshalBytes(src []byte) { - *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8]))) +func (i *Int16) UnmarshalBytes(src []byte) { + *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *Int64) Packed() bool { +func (i *Int16) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Int64) MarshalUnsafe(dst []byte) { +func (i *Int16) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Int64) UnmarshalUnsafe(src []byte) { +func (i *Int16) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -252,13 +252,13 @@ func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) ( // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -274,7 +274,7 @@ func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (i *Int64) WriteTo(w io.Writer) (int64, error) { +func (i *Int16) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -289,14 +289,14 @@ func (i *Int64) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory. +// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory. //go:nosplit -func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) { +func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int64)(nil).SizeBytes() + size := (*Int16)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -315,14 +315,14 @@ func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (i return length, err } -// CopyInt64SliceOut copies a slice of int64 objects to the task's memory. +// CopyInt16SliceOut copies a slice of int16 objects to the task's memory. //go:nosplit -func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) { +func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int64)(nil).SizeBytes() + size := (*Int16)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -341,13 +341,13 @@ func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) ( return length, err } -// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64. -func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) { +// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16. +func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int64)(nil).SizeBytes() + size := (*Int16)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -359,13 +359,13 @@ func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64. -func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) { +// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16. +func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int64)(nil).SizeBytes() + size := (*Int16)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -379,40 +379,40 @@ func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint64) SizeBytes() int { - return 8 +func (u *Uint16) SizeBytes() int { + return 2 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*u)) +func (u *Uint16) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(*u)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint64) UnmarshalBytes(src []byte) { - *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8]))) +func (u *Uint16) UnmarshalBytes(src []byte) { + *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Uint64) Packed() bool { +func (u *Uint16) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint64) MarshalUnsafe(dst []byte) { +func (u *Uint16) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(u)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint64) UnmarshalUnsafe(src []byte) { +func (u *Uint16) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(u), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -429,13 +429,13 @@ func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return u.CopyOutN(cc, addr, u.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -451,7 +451,7 @@ func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (u *Uint64) WriteTo(w io.Writer) (int64, error) { +func (u *Uint16) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -466,14 +466,14 @@ func (u *Uint64) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory. +// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory. //go:nosplit -func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) { +func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint64)(nil).SizeBytes() + size := (*Uint16)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -492,14 +492,14 @@ func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) return length, err } -// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory. +// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory. //go:nosplit -func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) { +func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint64)(nil).SizeBytes() + size := (*Uint16)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -518,13 +518,13 @@ func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) return length, err } -// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64. -func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) { +// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16. +func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint64)(nil).SizeBytes() + size := (*Uint16)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -536,13 +536,13 @@ func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64. -func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) { +// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16. +func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint64)(nil).SizeBytes() + size := (*Uint16)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -556,40 +556,40 @@ func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (i *Int8) SizeBytes() int { - return 1 +func (i *Int32) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Int8) MarshalBytes(dst []byte) { - dst[0] = byte(*i) +func (i *Int32) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*i)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Int8) UnmarshalBytes(src []byte) { - *i = Int8(int8(src[0])) +func (i *Int32) UnmarshalBytes(src []byte) { + *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *Int8) Packed() bool { +func (i *Int32) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Int8) MarshalUnsafe(dst []byte) { +func (i *Int32) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Int8) UnmarshalUnsafe(src []byte) { +func (i *Int32) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -606,13 +606,13 @@ func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -628,7 +628,7 @@ func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (i *Int8) WriteTo(w io.Writer) (int64, error) { +func (i *Int32) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -643,14 +643,14 @@ func (i *Int8) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory. +// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory. //go:nosplit -func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) { +func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int8)(nil).SizeBytes() + size := (*Int32)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -669,14 +669,14 @@ func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int return length, err } -// CopyInt8SliceOut copies a slice of int8 objects to the task's memory. +// CopyInt32SliceOut copies a slice of int32 objects to the task's memory. //go:nosplit -func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) { +func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int8)(nil).SizeBytes() + size := (*Int32)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -695,13 +695,13 @@ func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (in return length, err } -// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8. -func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) { +// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32. +func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int8)(nil).SizeBytes() + size := (*Int32)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -713,13 +713,13 @@ func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8. -func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) { +// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32. +func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int8)(nil).SizeBytes() + size := (*Int32)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -733,40 +733,40 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint8) SizeBytes() int { - return 1 +func (u *Uint32) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint8) MarshalBytes(dst []byte) { - dst[0] = byte(*u) +func (u *Uint32) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*u)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint8) UnmarshalBytes(src []byte) { - *u = Uint8(uint8(src[0])) +func (u *Uint32) UnmarshalBytes(src []byte) { + *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Uint8) Packed() bool { +func (u *Uint32) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint8) MarshalUnsafe(dst []byte) { +func (u *Uint32) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(u)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint8) UnmarshalUnsafe(src []byte) { +func (u *Uint32) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(u), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -783,13 +783,13 @@ func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) ( // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return u.CopyOutN(cc, addr, u.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -805,7 +805,7 @@ func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (u *Uint8) WriteTo(w io.Writer) (int64, error) { +func (u *Uint32) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -820,14 +820,14 @@ func (u *Uint8) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. +// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory. //go:nosplit -func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { +func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint8)(nil).SizeBytes() + size := (*Uint32)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -846,14 +846,14 @@ func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (i return length, err } -// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. +// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory. //go:nosplit -func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { +func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint8)(nil).SizeBytes() + size := (*Uint32)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -872,13 +872,13 @@ func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) ( return length, err } -// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. -func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { +// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32. +func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint8)(nil).SizeBytes() + size := (*Uint32)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -890,13 +890,13 @@ func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. -func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { +// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32. +func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint8)(nil).SizeBytes() + size := (*Uint32)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -910,40 +910,40 @@ func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (i *Int16) SizeBytes() int { - return 2 +func (i *Int64) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Int16) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(*i)) +func (i *Int64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*i)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Int16) UnmarshalBytes(src []byte) { - *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2]))) +func (i *Int64) UnmarshalBytes(src []byte) { + *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *Int16) Packed() bool { +func (i *Int64) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Int16) MarshalUnsafe(dst []byte) { +func (i *Int64) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Int16) UnmarshalUnsafe(src []byte) { +func (i *Int64) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -960,13 +960,13 @@ func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) ( // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -982,7 +982,7 @@ func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (i *Int16) WriteTo(w io.Writer) (int64, error) { +func (i *Int64) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -997,14 +997,14 @@ func (i *Int16) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory. +// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory. //go:nosplit -func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) { +func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int16)(nil).SizeBytes() + size := (*Int64)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1023,14 +1023,14 @@ func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (i return length, err } -// CopyInt16SliceOut copies a slice of int16 objects to the task's memory. +// CopyInt64SliceOut copies a slice of int64 objects to the task's memory. //go:nosplit -func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) { +func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int16)(nil).SizeBytes() + size := (*Int64)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1049,13 +1049,13 @@ func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) ( return length, err } -// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16. -func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) { +// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64. +func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int16)(nil).SizeBytes() + size := (*Int64)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1067,13 +1067,13 @@ func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16. -func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) { +// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64. +func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int16)(nil).SizeBytes() + size := (*Int64)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1087,40 +1087,40 @@ func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint16) SizeBytes() int { - return 2 +func (u *Uint64) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint16) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(*u)) +func (u *Uint64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*u)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint16) UnmarshalBytes(src []byte) { - *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2]))) +func (u *Uint64) UnmarshalBytes(src []byte) { + *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Uint16) Packed() bool { +func (u *Uint64) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint16) MarshalUnsafe(dst []byte) { +func (u *Uint64) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(u)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint16) UnmarshalUnsafe(src []byte) { +func (u *Uint64) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(u), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1137,13 +1137,13 @@ func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return u.CopyOutN(cc, addr, u.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1159,7 +1159,7 @@ func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (u *Uint16) WriteTo(w io.Writer) (int64, error) { +func (u *Uint64) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1174,14 +1174,14 @@ func (u *Uint16) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory. +// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory. //go:nosplit -func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) { +func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint16)(nil).SizeBytes() + size := (*Uint64)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1200,14 +1200,14 @@ func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) return length, err } -// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory. +// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory. //go:nosplit -func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) { +func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint16)(nil).SizeBytes() + size := (*Uint64)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1226,13 +1226,13 @@ func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) return length, err } -// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16. -func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) { +// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64. +func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Uint16)(nil).SizeBytes() + size := (*Uint64)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1244,13 +1244,13 @@ func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16. -func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) { +// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64. +func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Uint16)(nil).SizeBytes() + size := (*Uint64)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1264,40 +1264,40 @@ func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (i *Int32) SizeBytes() int { - return 4 +func (i *Int8) SizeBytes() int { + return 1 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Int32) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*i)) +func (i *Int8) MarshalBytes(dst []byte) { + dst[0] = byte(*i) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Int32) UnmarshalBytes(src []byte) { - *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4]))) +func (i *Int8) UnmarshalBytes(src []byte) { + *i = Int8(int8(src[0])) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *Int32) Packed() bool { +func (i *Int8) Packed() bool { // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Int32) MarshalUnsafe(dst []byte) { +func (i *Int8) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Int32) UnmarshalUnsafe(src []byte) { +func (i *Int8) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1314,13 +1314,13 @@ func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) ( // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1336,7 +1336,7 @@ func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { } // WriteTo implements io.WriterTo.WriteTo. -func (i *Int32) WriteTo(w io.Writer) (int64, error) { +func (i *Int8) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1351,14 +1351,14 @@ func (i *Int32) WriteTo(w io.Writer) (int64, error) { return int64(length), err } -// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory. +// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory. //go:nosplit -func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) { +func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int32)(nil).SizeBytes() + size := (*Int8)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1377,14 +1377,14 @@ func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (i return length, err } -// CopyInt32SliceOut copies a slice of int32 objects to the task's memory. +// CopyInt8SliceOut copies a slice of int8 objects to the task's memory. //go:nosplit -func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) { +func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int32)(nil).SizeBytes() + size := (*Int8)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1403,13 +1403,13 @@ func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) ( return length, err } -// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32. -func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) { +// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8. +func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) { count := len(src) if count == 0 { return 0, nil } - size := (*Int32)(nil).SizeBytes() + size := (*Int8)(nil).SizeBytes() ptr := unsafe.Pointer(&src) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) @@ -1421,13 +1421,13 @@ func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) { return length, err } -// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32. -func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) { +// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8. +func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) { count := len(dst) if count == 0 { return 0, nil } - size := (*Int32)(nil).SizeBytes() + size := (*Int8)(nil).SizeBytes() ptr := unsafe.Pointer(&dst) val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go index fe68f921d..1bd458b68 100644 --- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go @@ -23,6 +23,116 @@ var _ marshal.Marshallable = (*SignalStack)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalInfo) SizeBytes() int { + return 16 + + 1*(128-16) +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalInfo) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + for idx := 0; idx < (128-16); idx++ { + dst[0] = byte(s.Fields[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalInfo) UnmarshalBytes(src []byte) { + s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + for idx := 0; idx < (128-16); idx++ { + s.Fields[idx] = src[0] + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalInfo) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalInfo) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalInfo) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SignalAct) SizeBytes() int { return 24 + (*linux.SignalSet)(nil).SizeBytes() @@ -260,113 +370,3 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalInfo) SizeBytes() int { - return 16 + - 1*(128-16) -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalInfo) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - for idx := 0; idx < (128-16); idx++ { - dst[0] = byte(s.Fields[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalInfo) UnmarshalBytes(src []byte) { - s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - for idx := 0; idx < (128-16); idx++ { - s.Fields[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalInfo) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalInfo) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalInfo) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go index 95a73e115..b8667cdb9 100644 --- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go @@ -27,6 +27,241 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. +func (a *aarch64Ctx) SizeBytes() int { + return 8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (a *aarch64Ctx) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Magic)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Size)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (a *aarch64Ctx) UnmarshalBytes(src []byte) { + a.Magic = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + a.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (a *aarch64Ctx) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (a *aarch64Ctx) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(a)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(a), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) + hdr.Len = a.SizeBytes() + hdr.Cap = a.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that a + // must live until the use above. + runtime.KeepAlive(a) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return a.CopyOutN(cc, addr, a.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) + hdr.Len = a.SizeBytes() + hdr.Cap = a.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that a + // must live until the use above. + runtime.KeepAlive(a) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (a *aarch64Ctx) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) + hdr.Len = a.SizeBytes() + hdr.Cap = a.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that a + // must live until the use above. + runtime.KeepAlive(a) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FpsimdContext) SizeBytes() int { + return 8 + + (*aarch64Ctx)(nil).SizeBytes() + + 8*64 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FpsimdContext) MarshalBytes(dst []byte) { + f.Head.MarshalBytes(dst[:f.Head.SizeBytes()]) + dst = dst[f.Head.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr)) + dst = dst[4:] + for idx := 0; idx < 64; idx++ { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx])) + dst = dst[8:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FpsimdContext) UnmarshalBytes(src []byte) { + f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()]) + src = src[f.Head.SizeBytes():] + f.Fpsr = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Fpcr = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < 64; idx++ { + f.Vregs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FpsimdContext) Packed() bool { + return f.Head.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FpsimdContext) MarshalUnsafe(dst []byte) { + if f.Head.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FpsimdContext doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FpsimdContext) UnmarshalUnsafe(src []byte) { + if f.Head.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FpsimdContext doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Head.Packed() { + // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return f.CopyOutN(cc, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Head.Packed() { + // Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) { + if !f.Head.Packed() { + // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (u *UContext64) SizeBytes() int { return 16 + (*SignalStack)(nil).SizeBytes() + @@ -355,238 +590,3 @@ func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (a *aarch64Ctx) SizeBytes() int { - return 8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (a *aarch64Ctx) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Magic)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Size)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (a *aarch64Ctx) UnmarshalBytes(src []byte) { - a.Magic = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - a.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (a *aarch64Ctx) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (a *aarch64Ctx) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(a)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(a), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) - hdr.Len = a.SizeBytes() - hdr.Cap = a.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that a - // must live until the use above. - runtime.KeepAlive(a) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return a.CopyOutN(cc, addr, a.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) - hdr.Len = a.SizeBytes() - hdr.Cap = a.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that a - // must live until the use above. - runtime.KeepAlive(a) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (a *aarch64Ctx) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a))) - hdr.Len = a.SizeBytes() - hdr.Cap = a.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that a - // must live until the use above. - runtime.KeepAlive(a) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FpsimdContext) SizeBytes() int { - return 8 + - (*aarch64Ctx)(nil).SizeBytes() + - 8*64 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FpsimdContext) MarshalBytes(dst []byte) { - f.Head.MarshalBytes(dst[:f.Head.SizeBytes()]) - dst = dst[f.Head.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr)) - dst = dst[4:] - for idx := 0; idx < 64; idx++ { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx])) - dst = dst[8:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FpsimdContext) UnmarshalBytes(src []byte) { - f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()]) - src = src[f.Head.SizeBytes():] - f.Fpsr = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Fpcr = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - for idx := 0; idx < 64; idx++ { - f.Vregs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (f *FpsimdContext) Packed() bool { - return f.Head.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FpsimdContext) MarshalUnsafe(dst []byte) { - if f.Head.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FpsimdContext doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FpsimdContext) UnmarshalUnsafe(src []byte) { - if f.Head.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FpsimdContext doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Head.Packed() { - // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return f.CopyOutN(cc, addr, f.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Head.Packed() { - // Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) { - if !f.Head.Packed() { - // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go index da6aad4a5..d0c9ec652 100644 --- a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go +++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go @@ -19,95 +19,6 @@ var _ marshal.Marshallable = (*UID)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (uid *UID) SizeBytes() int { - return 4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (uid *UID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid)) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (uid *UID) UnmarshalBytes(src []byte) { - *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (uid *UID) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (uid *UID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(uid)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (uid *UID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(uid), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return uid.CopyOutN(cc, addr, uid.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (uid *UID) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit func (gid *GID) SizeBytes() int { return 4 } @@ -283,3 +194,92 @@ func UnmarshalUnsafeGIDSlice(dst []GID, src []byte) (int, error) { return length, err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit +func (uid *UID) SizeBytes() int { + return 4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (uid *UID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid)) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (uid *UID) UnmarshalBytes(src []byte) { + *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (uid *UID) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (uid *UID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(uid)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (uid *UID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(uid), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return uid.CopyOutN(cc, addr, uid.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (uid *UID) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go index ccd56285a..516ea1842 100644 --- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go @@ -112,49 +112,26 @@ func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *MessageHeader64) SizeBytes() int { - return 56 +func (m *multipleMessageHeader64) SizeBytes() int { + return 8 + + (*MessageHeader64)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *MessageHeader64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) +func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { + m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) + dst = dst[m.msgHdr.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *MessageHeader64) UnmarshalBytes(src []byte) { - m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) +func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { + m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) + src = src[m.msgHdr.SizeBytes():] + m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -162,23 +139,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *MessageHeader64) Packed() bool { - return true +func (m *multipleMessageHeader64) Packed() bool { + return m.msgHdr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *MessageHeader64) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(m)) +func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { + if m.msgHdr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(m)) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. + m.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(m), src) +func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { + if m.msgHdr.Packed() { + safecopy.CopyOut(unsafe.Pointer(m), src) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + m.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + m.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -195,13 +189,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + m.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -217,7 +221,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int } // WriteTo implements io.WriterTo.WriteTo. -func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { +func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, m.SizeBytes()) + m.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -233,26 +245,49 @@ func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *multipleMessageHeader64) SizeBytes() int { - return 8 + - (*MessageHeader64)(nil).SizeBytes() +func (m *MessageHeader64) SizeBytes() int { + return 56 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { - m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) - dst = dst[m.msgHdr.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) +func (m *MessageHeader64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { - m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) - src = src[m.msgHdr.SizeBytes():] - m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (m *MessageHeader64) UnmarshalBytes(src []byte) { + m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -260,40 +295,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *multipleMessageHeader64) Packed() bool { - return m.msgHdr.Packed() +func (m *MessageHeader64) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { - if m.msgHdr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(m)) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. - m.MarshalBytes(dst) - } +func (m *MessageHeader64) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(m)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { - if m.msgHdr.Packed() { - safecopy.CopyOut(unsafe.Pointer(m), src) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - m.UnmarshalBytes(src) - } +func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(m), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - m.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -310,23 +328,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem. // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - m.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -342,15 +350,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad } // WriteTo implements io.WriterTo.WriteTo. -func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, m.SizeBytes()) - m.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) |