diff options
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 3094 | ||||
-rw-r--r-- | pkg/marshal/primitive/primitive_abi_autogen_unsafe.go | 354 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_abi_autogen_unsafe.go | 220 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go | 286 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go | 336 | ||||
-rw-r--r-- | pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go | 178 | ||||
-rw-r--r-- | pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go | 200 | ||||
-rw-r--r-- | pkg/tcpip/header/ipv6.go | 15 | ||||
-rw-r--r-- | pkg/tcpip/header/ipv6_extension_headers.go | 326 | ||||
-rw-r--r-- | pkg/tcpip/header/ipv6_fragment.go | 42 | ||||
-rw-r--r-- | pkg/tcpip/network/ipv6/ipv6.go | 54 | ||||
-rw-r--r-- | pkg/tcpip/network/ipv6/mld.go | 12 | ||||
-rw-r--r-- | pkg/tcpip/network/ipv6/ndp.go | 4 |
13 files changed, 2731 insertions, 2390 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index 66bb07720..9464dabc7 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -100,53 +100,85 @@ var _ marshal.Marshallable = (*XTCounters)(nil) var _ marshal.Marshallable = (*XTGetRevision)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IOEvent) SizeBytes() int { - return 32 +func (i *IOCallback) SizeBytes() int { + return 64 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IOEvent) MarshalBytes(dst []byte) { +func (i *IOCallback) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Obj)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.FD)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Buf)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result2)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Offset)) dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Flags)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD)) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IOEvent) UnmarshalBytes(src []byte) { +func (i *IOCallback) UnmarshalBytes(src []byte) { i.Data = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Obj = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Key = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + i.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.ReqPrio = int16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.FD = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Buf = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Result = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Bytes = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Result2 = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Offset = int64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] + i.Reserved2 = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + i.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.ResFD = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IOEvent) Packed() bool { +func (i *IOCallback) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IOEvent) MarshalUnsafe(dst []byte) { +func (i *IOCallback) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IOEvent) UnmarshalUnsafe(src []byte) { +func (i *IOCallback) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -163,13 +195,13 @@ func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -185,7 +217,7 @@ func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) { +func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -201,85 +233,53 @@ func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IOCallback) SizeBytes() int { - return 64 +func (i *IOEvent) SizeBytes() int { + return 32 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IOCallback) MarshalBytes(dst []byte) { +func (i *IOEvent) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Data)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Key)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.OpCode)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.ReqPrio)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.FD)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Buf)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Bytes)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Obj)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Offset)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Reserved2)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Result2)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Flags)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ResFD)) - dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IOCallback) UnmarshalBytes(src []byte) { +func (i *IOEvent) UnmarshalBytes(src []byte) { i.Data = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Key = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - i.OpCode = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.ReqPrio = int16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.FD = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Buf = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - i.Bytes = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Obj = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Offset = int64(usermem.ByteOrder.Uint64(src[:8])) + i.Result = int64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Reserved2 = uint64(usermem.ByteOrder.Uint64(src[:8])) + i.Result2 = int64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - i.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.ResFD = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IOCallback) Packed() bool { +func (i *IOEvent) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IOCallback) MarshalUnsafe(dst []byte) { +func (i *IOEvent) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IOCallback) UnmarshalUnsafe(src []byte) { +func (i *IOEvent) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IOEvent) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -296,13 +296,13 @@ func (i *IOCallback) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IOCallback) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOEvent) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IOEvent) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -318,7 +318,7 @@ func (i *IOCallback) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *IOCallback) WriteTo(writer io.Writer) (int64, error) { +func (i *IOEvent) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1352,41 +1352,49 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpcode) SizeBytes() int { - return 4 +func (f *FUSEGetAttrIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpcode) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) +func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpcode) UnmarshalBytes(src []byte) { - *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { + f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpcode) Packed() bool { - // Scalar newtypes are always packed. +func (f *FUSEGetAttrIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { +func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { +func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1403,13 +1411,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1425,7 +1433,7 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { +func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1433,7 +1441,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := w.Write(buf) + length, err := writer.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -1441,41 +1449,65 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpID) SizeBytes() int { - return 8 +func (f *FUSEWriteIn) SizeBytes() int { + return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) +func (f *FUSEWriteIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpID) UnmarshalBytes(src []byte) { - *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) +func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpID) Packed() bool { - // Scalar newtypes are always packed. +func (f *FUSEWriteIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpID) MarshalUnsafe(dst []byte) { +func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { +func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1492,13 +1524,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1514,7 +1546,7 @@ func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { +func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1522,7 +1554,7 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := w.Write(buf) + length, err := writer.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -1530,45 +1562,53 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenIn) SizeBytes() int { - return 8 +func (f *FUSEReleaseIn) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenIn) MarshalBytes(dst []byte) { +func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { +func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenIn) Packed() bool { +func (f *FUSEReleaseIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { +func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1585,13 +1625,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1607,7 +1647,7 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1623,53 +1663,41 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEMknodMeta) SizeBytes() int { - return 16 +//go:nosplit +func (f *FUSEOpID) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] +func (f *FUSEOpID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] +func (f *FUSEOpID) UnmarshalBytes(src []byte) { + *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEMknodMeta) Packed() bool { +func (f *FUSEOpID) Packed() bool { + // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEOpID) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1686,13 +1714,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1708,7 +1736,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1716,7 +1744,7 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -1724,53 +1752,88 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitIn) SizeBytes() int { - return 16 +func (f *FUSEHeaderIn) SizeBytes() int { + return 28 + + (*FUSEOpcode)(nil).SizeBytes() + + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) +func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) + dst = dst[f.Opcode.SizeBytes():] + f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) + dst = dst[f.Unique.SizeBytes():] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitIn) UnmarshalBytes(src []byte) { - f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { + f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) + src = src[f.Opcode.SizeBytes():] + f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) + src = src[f.Unique.SizeBytes():] + f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitIn) Packed() bool { - return true +func (f *FUSEHeaderIn) Packed() bool { + return f.Opcode.Packed() && f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1787,13 +1850,23 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1809,7 +1882,15 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1974,166 +2055,45 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEWriteIn) SizeBytes() int { - return 40 +func (f *FUSEWriteOut) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEWriteIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) - dst = dst[8:] +func (f *FUSEWriteOut) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) - dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] +func (f *FUSEWriteOut) UnmarshalBytes(src []byte) { f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (f *FUSEWriteIn) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return f.CopyOutN(cc, addr, f.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSECreateMeta) SizeBytes() int { - return 16 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSECreateMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSECreateMeta) Packed() bool { +func (f *FUSEWriteOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2150,13 +2110,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2172,7 +2132,7 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2337,49 +2297,53 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEGetAttrIn) SizeBytes() int { - return 16 +func (f *FUSEDirentMeta) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) +func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { - f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEGetAttrIn) Packed() bool { +func (f *FUSEDirentMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { +func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2396,13 +2360,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2418,7 +2382,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2434,101 +2398,41 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEAttr) SizeBytes() int { - return 88 +//go:nosplit +func (f *FUSEOpcode) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEAttr) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] +func (f *FUSEOpcode) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEAttr) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] +func (f *FUSEOpcode) UnmarshalBytes(src []byte) { + *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEAttr) Packed() bool { +func (f *FUSEOpcode) Packed() bool { + // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEAttr) MarshalUnsafe(dst []byte) { +func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2545,13 +2449,13 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2567,7 +2471,7 @@ func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2575,7 +2479,7 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -2583,66 +2487,62 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEGetAttrOut) SizeBytes() int { - return 16 + - (*FUSEAttr)(nil).SizeBytes() +func (f *FUSEHeaderOut) SizeBytes() int { + return 8 + + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec)) +func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) dst = dst[4:] - f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) - dst = dst[f.Attr.SizeBytes():] + f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) + dst = dst[f.Unique.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) { - f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { + f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) - src = src[f.Attr.SizeBytes():] + f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) + src = src[f.Unique.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEGetAttrOut) Packed() bool { - return f.Attr.Packed() +func (f *FUSEHeaderOut) Packed() bool { + return f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) { - if f.Attr.Packed() { +func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { + if f.Unique.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. f.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) { - if f.Attr.Packed() { +func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { + if f.Unique.Packed() { safecopy.CopyOut(unsafe.Pointer(f), src) } else { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. f.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. f.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -2664,15 +2564,15 @@ func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. +func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -2696,9 +2596,9 @@ func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, f.SizeBytes()) f.MarshalBytes(buf) length, err := writer.Write(buf) @@ -2720,41 +2620,77 @@ func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReadIn) SizeBytes() int { - return 40 +func (f *FUSEAttr) SizeBytes() int { + return 88 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReadIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) +func (f *FUSEAttr) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReadIn) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) +func (f *FUSEAttr) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -2762,23 +2698,23 @@ func (f *FUSEReadIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReadIn) Packed() bool { +func (f *FUSEAttr) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { +func (f *FUSEAttr) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2795,13 +2731,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2817,7 +2753,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2833,21 +2769,21 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEWriteOut) SizeBytes() int { +func (f *FUSEOpenIn) SizeBytes() int { return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEWriteOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) +func (f *FUSEOpenIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEWriteOut) UnmarshalBytes(src []byte) { - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -2855,23 +2791,23 @@ func (f *FUSEWriteOut) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEWriteOut) Packed() bool { +func (f *FUSEOpenIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2888,13 +2824,13 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2910,7 +2846,7 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2926,53 +2862,49 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReleaseIn) SizeBytes() int { - return 24 +func (f *FUSEOpenOut) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { +func (f *FUSEOpenOut) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { +func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReleaseIn) Packed() bool { +func (f *FUSEOpenOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2989,13 +2921,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3011,7 +2943,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3027,45 +2959,53 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEMkdirMeta) SizeBytes() int { - return 8 +func (f *FUSECreateMeta) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) { +func (f *FUSECreateMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) { +func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEMkdirMeta) Packed() bool { +func (f *FUSECreateMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) { +func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3082,13 +3022,13 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3104,7 +3044,7 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3120,88 +3060,183 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderIn) SizeBytes() int { - return 28 + - (*FUSEOpcode)(nil).SizeBytes() + - (*FUSEOpID)(nil).SizeBytes() +func (f *FUSEInitIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) +func (f *FUSEInitIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) dst = dst[4:] - f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) - dst = dst[f.Opcode.SizeBytes():] - f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) - dst = dst[f.Unique.SizeBytes():] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEInitIn) UnmarshalBytes(src []byte) { + f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEInitIn) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return f.CopyOutN(cc, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEInitOut) SizeBytes() int { + return 32 + + 4*8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEInitOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) + dst = dst[2:] + // Padding: dst[:sizeof(uint16)] ~= uint16(0) + dst = dst[2:] + // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} + dst = dst[4*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { - f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEInitOut) UnmarshalBytes(src []byte) { + f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) - src = src[f.Opcode.SizeBytes():] - f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) - src = src[f.Unique.SizeBytes():] - f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + // Padding: var _ uint16 ~= src[:sizeof(uint16)] + src = src[2:] + // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) + src = src[4*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderIn) Packed() bool { - return f.Opcode.Packed() && f.Unique.Packed() +func (f *FUSEInitOut) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3218,23 +3253,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3250,15 +3275,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3274,62 +3291,66 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderOut) SizeBytes() int { - return 8 + - (*FUSEOpID)(nil).SizeBytes() +func (f *FUSEGetAttrOut) SizeBytes() int { + return 16 + + (*FUSEAttr)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) +func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) - dst = dst[f.Unique.SizeBytes():] + f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) + dst = dst[f.Attr.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { - f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) { + f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) - src = src[f.Unique.SizeBytes():] + f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) + src = src[f.Attr.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderOut) Packed() bool { - return f.Unique.Packed() +func (f *FUSEGetAttrOut) Packed() bool { + return f.Attr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { - if f.Unique.Packed() { +func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) { + if f.Attr.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes. f.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { - if f.Unique.Packed() { +func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) { + if f.Attr.Packed() { safecopy.CopyOut(unsafe.Pointer(f), src) } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. f.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. f.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -3351,15 +3372,15 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. +func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -3383,9 +3404,9 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, f.SizeBytes()) f.MarshalBytes(buf) length, err := writer.Write(buf) @@ -3407,82 +3428,65 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitOut) SizeBytes() int { - return 32 + - 4*8 +func (f *FUSEReadIn) SizeBytes() int { + return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) +func (f *FUSEReadIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) - dst = dst[2:] - // Padding: dst[:sizeof(uint16)] ~= uint16(0) - dst = dst[2:] - // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} - dst = dst[4*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitOut) UnmarshalBytes(src []byte) { - f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEReadIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - // Padding: var _ uint16 ~= src[:sizeof(uint16)] - src = src[2:] - // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) - src = src[4*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitOut) Packed() bool { +func (f *FUSEReadIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { +func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3499,13 +3503,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3521,7 +3525,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3537,25 +3541,29 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenOut) SizeBytes() int { +func (f *FUSEMknodMeta) SizeBytes() int { return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) +func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -3563,23 +3571,23 @@ func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenOut) Packed() bool { +func (f *FUSEMknodMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { +func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3596,13 +3604,13 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3618,7 +3626,7 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3634,53 +3642,45 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEDirentMeta) SizeBytes() int { - return 24 +func (f *FUSEMkdirMeta) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) +func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) { + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEDirentMeta) Packed() bool { +func (f *FUSEMkdirMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3697,13 +3697,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3719,7 +3719,7 @@ func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -4473,161 +4473,6 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IPTGetinfo) SizeBytes() int { - return 12 + - (*TableName)(nil).SizeBytes() + - 4*NF_INET_NUMHOOKS + - 4*NF_INET_NUMHOOKS -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IPTGetinfo) MarshalBytes(dst []byte) { - i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) - dst = dst[i.Name.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) - dst = dst[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) - dst = dst[4:] - } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) - dst = dst[4:] - } - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IPTGetinfo) UnmarshalBytes(src []byte) { - i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) - src = src[i.Name.SizeBytes():] - i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *IPTGetinfo) Packed() bool { - return i.Name.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IPTGetinfo) MarshalUnsafe(dst []byte) { - if i.Name.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) - } else { - // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) { - if i.Name.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) - } else { - // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) { - if !i.Name.Packed() { - // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (i *IPTGetEntries) SizeBytes() int { return 4 + (*TableName)(nil).SizeBytes() + @@ -5508,23 +5353,19 @@ func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IP6TReplace) SizeBytes() int { - return 24 + +func (i *IPTGetinfo) SizeBytes() int { + return 12 + (*TableName)(nil).SizeBytes() + 4*NF_INET_NUMHOOKS + 4*NF_INET_NUMHOOKS } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IP6TReplace) MarshalBytes(dst []byte) { +func (i *IPTGetinfo) MarshalBytes(dst []byte) { i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) dst = dst[i.Name.SizeBytes():] usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) - dst = dst[4:] for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) dst = dst[4:] @@ -5533,22 +5374,18 @@ func (i *IP6TReplace) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) dst = dst[4:] } - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IP6TReplace) UnmarshalBytes(src []byte) { +func (i *IPTGetinfo) UnmarshalBytes(src []byte) { i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) src = src[i.Name.SizeBytes():] i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] @@ -5557,43 +5394,43 @@ func (i *IP6TReplace) UnmarshalBytes(src []byte) { i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } - i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) + i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IP6TReplace) Packed() bool { +func (i *IPTGetinfo) Packed() bool { return i.Name.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IP6TReplace) MarshalUnsafe(dst []byte) { +func (i *IPTGetinfo) MarshalUnsafe(dst []byte) { if i.Name.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes. i.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { +func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) { if i.Name.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes. i.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -5615,15 +5452,15 @@ func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -5647,9 +5484,9 @@ func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { +func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) { if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, i.SizeBytes()) i.MarshalBytes(buf) length, err := writer.Write(buf) @@ -6019,6 +5856,169 @@ func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IP6TReplace) SizeBytes() int { + return 24 + + (*TableName)(nil).SizeBytes() + + 4*NF_INET_NUMHOOKS + + 4*NF_INET_NUMHOOKS +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IP6TReplace) MarshalBytes(dst []byte) { + i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) + dst = dst[i.Name.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + dst = dst[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) + dst = dst[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) + dst = dst[4:] + } + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *IP6TReplace) UnmarshalBytes(src []byte) { + i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) + src = src[i.Name.SizeBytes():] + i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *IP6TReplace) Packed() bool { + return i.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *IP6TReplace) MarshalUnsafe(dst []byte) { + if i.Name.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { + if i.Name.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrNetlink) SizeBytes() int { return 12 } @@ -7730,237 +7730,6 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (l *Linger) SizeBytes() int { - return 8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (l *Linger) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (l *Linger) UnmarshalBytes(src []byte) { - l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - l.Linger = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (l *Linger) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (l *Linger) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(l)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (l *Linger) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(l), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that l - // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return l.CopyOutN(cc, addr, l.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that l - // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (l *Linger) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) - hdr.Len = l.SizeBytes() - hdr.Cap = l.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that l - // must live until the use above. - runtime.KeepAlive(l) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrInet) SizeBytes() int { - return 4 + - (*InetAddr)(nil).SizeBytes() + - 1*8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrInet) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port)) - dst = dst[2:] - s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()]) - dst = dst[s.Addr.SizeBytes():] - // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0} - dst = dst[1*(8):] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrInet) UnmarshalBytes(src []byte) { - s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.Port = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()]) - src = src[s.Addr.SizeBytes():] - // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8]) - src = src[1*(8):] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SockAddrInet) Packed() bool { - return s.Addr.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrInet) MarshalUnsafe(dst []byte) { - if s.Addr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrInet) UnmarshalUnsafe(src []byte) { - if s.Addr.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) { - if !s.Addr.Packed() { - // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrLink) SizeBytes() int { return 12 + 1*8 @@ -8177,6 +7946,99 @@ func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (l *Linger) SizeBytes() int { + return 8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (l *Linger) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (l *Linger) UnmarshalBytes(src []byte) { + l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + l.Linger = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (l *Linger) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (l *Linger) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(l)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (l *Linger) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(l), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return l.CopyOutN(cc, addr, l.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (l *Linger) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l))) + hdr.Len = l.SizeBytes() + hdr.Cap = l.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that l + // must live until the use above. + runtime.KeepAlive(l) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (t *TCPInfo) SizeBytes() int { return 192 } @@ -8446,293 +8308,6 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (c *ControlMessageCredentials) SizeBytes() int { - return 12 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (c *ControlMessageCredentials) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) { - c.PID = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - c.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - c.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (c *ControlMessageCredentials) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(c)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(c), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return c.CopyOutN(cc, addr, c.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (i *InetAddr) SizeBytes() int { - return 1 * 4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *InetAddr) MarshalBytes(dst []byte) { - for idx := 0; idx < 4; idx++ { - dst[0] = byte(i[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *InetAddr) UnmarshalBytes(src []byte) { - for idx := 0; idx < 4; idx++ { - i[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *InetAddr) Packed() bool { - // Array newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *InetAddr) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(i)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *InetAddr) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(i), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *InetAddr) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (i *Inet6Addr) SizeBytes() int { - return 1 * 16 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Inet6Addr) MarshalBytes(dst []byte) { - for idx := 0; idx < 16; idx++ { - dst[0] = byte(i[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Inet6Addr) UnmarshalBytes(src []byte) { - for idx := 0; idx < 16; idx++ { - i[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *Inet6Addr) Packed() bool { - // Array newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Inet6Addr) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(i)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Inet6Addr) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(i), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrInet6) SizeBytes() int { return 12 + 1*16 @@ -8843,117 +8418,125 @@ func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *ItimerVal) SizeBytes() int { - return 0 + - (*Timeval)(nil).SizeBytes() + - (*Timeval)(nil).SizeBytes() +func (s *SockAddrInet) SizeBytes() int { + return 4 + + (*InetAddr)(nil).SizeBytes() + + 1*8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *ItimerVal) MarshalBytes(dst []byte) { - i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) - dst = dst[i.Interval.SizeBytes():] - i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) - dst = dst[i.Value.SizeBytes():] +func (s *SockAddrInet) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port)) + dst = dst[2:] + s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()]) + dst = dst[s.Addr.SizeBytes():] + // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0} + dst = dst[1*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *ItimerVal) UnmarshalBytes(src []byte) { - i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) - src = src[i.Interval.SizeBytes():] - i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) - src = src[i.Value.SizeBytes():] +func (s *SockAddrInet) UnmarshalBytes(src []byte) { + s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.Port = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()]) + src = src[s.Addr.SizeBytes():] + // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8]) + src = src[1*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *ItimerVal) Packed() bool { - return i.Interval.Packed() && i.Value.Packed() +func (s *SockAddrInet) Packed() bool { + return s.Addr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *ItimerVal) MarshalUnsafe(dst []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) +func (s *SockAddrInet) MarshalUnsafe(dst []byte) { + if s.Addr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) + // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *ItimerVal) UnmarshalUnsafe(src []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) +func (s *SockAddrInet) UnmarshalUnsafe(src []byte) { + if s.Addr.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) + // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. +func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) +func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. +func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. + s.UnmarshalBytes(buf) // escapes: fallback. return length, err } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) +func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) { + if !s.Addr.Packed() { + // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) length, err := writer.Write(buf) return int64(length), err } @@ -8961,293 +8544,301 @@ func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that s // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (t *TimerID) SizeBytes() int { - return 4 +func (i *Inet6Addr) SizeBytes() int { + return 1 * 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *TimerID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*t)) +func (i *Inet6Addr) MarshalBytes(dst []byte) { + for idx := 0; idx < 16; idx++ { + dst[0] = byte(i[idx]) + dst = dst[1:] + } } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *TimerID) UnmarshalBytes(src []byte) { - *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4]))) +func (i *Inet6Addr) UnmarshalBytes(src []byte) { + for idx := 0; idx < 16; idx++ { + i[idx] = src[0] + src = src[1:] + } } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (t *TimerID) Packed() bool { - // Scalar newtypes are always packed. +func (i *Inet6Addr) Packed() bool { + // Array newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *TimerID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(t)) +func (i *Inet6Addr) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *TimerID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(t), src) +func (i *Inet6Addr) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return t.CopyOutN(cc, addr, t.SizeBytes()) +func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (t *TimerID) WriteTo(w io.Writer) (int64, error) { +func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (sxts *StatxTimestamp) SizeBytes() int { - return 16 +func (c *ControlMessageCredentials) SizeBytes() int { + return 12 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (sxts *StatxTimestamp) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec)) +func (c *ControlMessageCredentials) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID)) dst = dst[4:] - // Padding: dst[:sizeof(int32)] ~= int32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID)) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) { - sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) { + c.PID = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ int32 ~= src[:sizeof(int32)] + c.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (sxts *StatxTimestamp) Packed() bool { +func (c *ControlMessageCredentials) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(sxts)) +func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(c)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(sxts), src) +func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(c), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that sxts + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return sxts.CopyOutN(cc, addr, sxts.SizeBytes()) +func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return c.CopyOutN(cc, addr, c.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that sxts + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) { +func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) - hdr.Len = sxts.SizeBytes() - hdr.Cap = sxts.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that sxts + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (u *Utime) SizeBytes() int { - return 16 +//go:nosplit +func (i *InetAddr) SizeBytes() int { + return 1 * 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Utime) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) - dst = dst[8:] +func (i *InetAddr) MarshalBytes(dst []byte) { + for idx := 0; idx < 4; idx++ { + dst[0] = byte(i[idx]) + dst = dst[1:] + } } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Utime) UnmarshalBytes(src []byte) { - u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] +func (i *InetAddr) UnmarshalBytes(src []byte) { + for idx := 0; idx < 4; idx++ { + i[idx] = src[0] + src = src[1:] + } } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Utime) Packed() bool { +func (i *InetAddr) Packed() bool { + // Array newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Utime) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) +func (i *InetAddr) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Utime) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) +func (i *InetAddr) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) +func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (u *Utime) WriteTo(writer io.Writer) (int64, error) { +func (i *InetAddr) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return int64(length), err } @@ -9830,6 +9421,225 @@ func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) { // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit +func (t *TimerID) SizeBytes() int { + return 4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (t *TimerID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*t)) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (t *TimerID) UnmarshalBytes(src []byte) { + *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4]))) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (t *TimerID) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (t *TimerID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(t)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (t *TimerID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(t), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return t.CopyOutN(cc, addr, t.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (t *TimerID) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that t + // must live until the use above. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *ItimerVal) SizeBytes() int { + return 0 + + (*Timeval)(nil).SizeBytes() + + (*Timeval)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *ItimerVal) MarshalBytes(dst []byte) { + i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) + dst = dst[i.Interval.SizeBytes():] + i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) + dst = dst[i.Value.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *ItimerVal) UnmarshalBytes(src []byte) { + i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) + src = src[i.Interval.SizeBytes():] + i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) + src = src[i.Value.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *ItimerVal) Packed() bool { + return i.Interval.Packed() && i.Value.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *ItimerVal) MarshalUnsafe(dst []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *ItimerVal) UnmarshalUnsafe(src []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit func (c *ClockT) SizeBytes() int { return 8 } @@ -10058,6 +9868,196 @@ func (t *Tms) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (sxts *StatxTimestamp) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (sxts *StatxTimestamp) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec)) + dst = dst[4:] + // Padding: dst[:sizeof(int32)] ~= int32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) { + sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ int32 ~= src[:sizeof(int32)] + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (sxts *StatxTimestamp) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(sxts)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(sxts), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that sxts + // must live until the use above. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return sxts.CopyOutN(cc, addr, sxts.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that sxts + // must live until the use above. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts))) + hdr.Len = sxts.SizeBytes() + hdr.Cap = sxts.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that sxts + // must live until the use above. + runtime.KeepAlive(sxts) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (u *Utime) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *Utime) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *Utime) UnmarshalBytes(src []byte) { + u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *Utime) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *Utime) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(u)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *Utime) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(u), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *Utime) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (w *Winsize) SizeBytes() int { return 8 } diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go index 469fb972f..e198e8ac2 100644 --- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go +++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go @@ -25,183 +25,6 @@ var _ marshal.Marshallable = (*Uint8)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint8) SizeBytes() int { - return 1 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint8) MarshalBytes(dst []byte) { - dst[0] = byte(*u) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint8) UnmarshalBytes(src []byte) { - *u = Uint8(uint8(src[0])) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (u *Uint8) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint8) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint8) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (u *Uint8) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return int64(length), err -} - -// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. -//go:nosplit -func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { - count := len(dst) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&dst) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(val) - hdr.Len = size * count - hdr.Cap = size * count - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that dst - // must live until the use above. - runtime.KeepAlive(dst) // escapes: replaced by intrinsic. - return length, err -} - -// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. -//go:nosplit -func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { - count := len(src) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&src) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(val) - hdr.Len = size * count - hdr.Cap = size * count - - length, err := cc.CopyOutBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that src - // must live until the use above. - runtime.KeepAlive(src) // escapes: replaced by intrinsic. - return length, err -} - -// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. -func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { - count := len(src) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&src) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - length, err := safecopy.CopyIn(dst[:(size*count)], val) - // Since we bypassed the compiler's escape analysis, indicate that src - // must live until the use above. - runtime.KeepAlive(src) // escapes: replaced by intrinsic. - return length, err -} - -// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. -func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { - count := len(dst) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&dst) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - length, err := safecopy.CopyOut(val, src[:(size*count)]) - // Since we bypassed the compiler's escape analysis, indicate that dst - // must live until the use above. - runtime.KeepAlive(dst) // escapes: replaced by intrinsic. - return length, err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit func (i *Int16) SizeBytes() int { return 2 } @@ -1439,3 +1262,180 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) { return length, err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit +func (u *Uint8) SizeBytes() int { + return 1 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *Uint8) MarshalBytes(dst []byte) { + dst[0] = byte(*u) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *Uint8) UnmarshalBytes(src []byte) { + *u = Uint8(uint8(src[0])) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *Uint8) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *Uint8) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(u)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *Uint8) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(u), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *Uint8) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + +// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. +//go:nosplit +func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { + count := len(dst) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&dst) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(val) + hdr.Len = size * count + hdr.Cap = size * count + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that dst + // must live until the use above. + runtime.KeepAlive(dst) // escapes: replaced by intrinsic. + return length, err +} + +// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. +//go:nosplit +func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { + count := len(src) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&src) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(val) + hdr.Len = size * count + hdr.Cap = size * count + + length, err := cc.CopyOutBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that src + // must live until the use above. + runtime.KeepAlive(src) // escapes: replaced by intrinsic. + return length, err +} + +// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. +func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { + count := len(src) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&src) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + length, err := safecopy.CopyIn(dst[:(size*count)], val) + // Since we bypassed the compiler's escape analysis, indicate that src + // must live until the use above. + runtime.KeepAlive(src) // escapes: replaced by intrinsic. + return length, err +} + +// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. +func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { + count := len(dst) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&dst) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + length, err := safecopy.CopyOut(val, src[:(size*count)]) + // Since we bypassed the compiler's escape analysis, indicate that dst + // must live until the use above. + runtime.KeepAlive(dst) // escapes: replaced by intrinsic. + return length, err +} + diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go index 1bd458b68..fe68f921d 100644 --- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go @@ -23,116 +23,6 @@ var _ marshal.Marshallable = (*SignalStack)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalInfo) SizeBytes() int { - return 16 + - 1*(128-16) -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalInfo) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - for idx := 0; idx < (128-16); idx++ { - dst[0] = byte(s.Fields[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalInfo) UnmarshalBytes(src []byte) { - s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - for idx := 0; idx < (128-16); idx++ { - s.Fields[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalInfo) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalInfo) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalInfo) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SignalAct) SizeBytes() int { return 24 + (*linux.SignalSet)(nil).SizeBytes() @@ -370,3 +260,113 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalInfo) SizeBytes() int { + return 16 + + 1*(128-16) +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalInfo) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + for idx := 0; idx < (128-16); idx++ { + dst[0] = byte(s.Fields[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalInfo) UnmarshalBytes(src []byte) { + s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + for idx := 0; idx < (128-16); idx++ { + s.Fields[idx] = src[0] + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalInfo) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalInfo) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalInfo) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go index 466bf889a..cdbc88377 100644 --- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go @@ -25,6 +25,149 @@ var _ marshal.Marshallable = (*UContext64)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. +func (u *UContext64) SizeBytes() int { + return 16 + + (*SignalStack)(nil).SizeBytes() + + (*SignalContext64)(nil).SizeBytes() + + (*linux.SignalSet)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *UContext64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link)) + dst = dst[8:] + u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()]) + dst = dst[u.Stack.SizeBytes():] + u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()]) + dst = dst[u.MContext.SizeBytes():] + u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()]) + dst = dst[u.Sigset.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *UContext64) UnmarshalBytes(src []byte) { + u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Link = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()]) + src = src[u.Stack.SizeBytes():] + u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()]) + src = src[u.MContext.SizeBytes():] + u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()]) + src = src[u.Sigset.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *UContext64) Packed() bool { + return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *UContext64) MarshalUnsafe(dst []byte) { + if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(u)) + } else { + // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. + u.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *UContext64) UnmarshalUnsafe(src []byte) { + if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + safecopy.CopyOut(unsafe.Pointer(u), src) + } else { + // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + u.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. + u.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + u.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, u.SizeBytes()) + u.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SignalContext64) SizeBytes() int { return 184 + (*linux.SignalSet)(nil).SizeBytes() + @@ -262,146 +405,3 @@ func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (u *UContext64) SizeBytes() int { - return 16 + - (*SignalStack)(nil).SizeBytes() + - (*SignalContext64)(nil).SizeBytes() + - (*linux.SignalSet)(nil).SizeBytes() -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *UContext64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link)) - dst = dst[8:] - u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()]) - dst = dst[u.Stack.SizeBytes():] - u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()]) - dst = dst[u.MContext.SizeBytes():] - u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()]) - dst = dst[u.Sigset.SizeBytes():] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *UContext64) UnmarshalBytes(src []byte) { - u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Link = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()]) - src = src[u.Stack.SizeBytes():] - u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()]) - src = src[u.MContext.SizeBytes():] - u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()]) - src = src[u.Sigset.SizeBytes():] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (u *UContext64) Packed() bool { - return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *UContext64) MarshalUnsafe(dst []byte) { - if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(u)) - } else { - // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. - u.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *UContext64) UnmarshalUnsafe(src []byte) { - if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - safecopy.CopyOut(unsafe.Pointer(u), src) - } else { - // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - u.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. - u.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - u.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, u.SizeBytes()) - u.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go index b8667cdb9..aac25375e 100644 --- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go @@ -27,6 +27,174 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalContext64) SizeBytes() int { + return 32 + + 8*31 + + 1*8 + + (*FpsimdContext)(nil).SizeBytes() + + 1*3568 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalContext64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) + dst = dst[8:] + for idx := 0; idx < 31; idx++ { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) + dst = dst[8:] + } + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) + dst = dst[8:] + for idx := 0; idx < 8; idx++ { + dst[0] = byte(s._pad[idx]) + dst = dst[1:] + } + s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) + dst = dst[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + dst[0] = byte(s.Reserved[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalContext64) UnmarshalBytes(src []byte) { + s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 31; idx++ { + s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + } + s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 8; idx++ { + s._pad[idx] = src[0] + src = src[1:] + } + s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) + src = src[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + s.Reserved[idx] = uint8(src[0]) + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalContext64) Packed() bool { + return s.Fpsimd64.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalContext64) MarshalUnsafe(dst []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalContext64) UnmarshalUnsafe(src []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (a *aarch64Ctx) SizeBytes() int { return 8 } @@ -422,171 +590,3 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalContext64) SizeBytes() int { - return 32 + - 8*31 + - 1*8 + - (*FpsimdContext)(nil).SizeBytes() + - 1*3568 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalContext64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) - dst = dst[8:] - for idx := 0; idx < 31; idx++ { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) - dst = dst[8:] - } - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) - dst = dst[8:] - for idx := 0; idx < 8; idx++ { - dst[0] = byte(s._pad[idx]) - dst = dst[1:] - } - s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) - dst = dst[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - dst[0] = byte(s.Reserved[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalContext64) UnmarshalBytes(src []byte) { - s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 31; idx++ { - s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - } - s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 8; idx++ { - s._pad[idx] = src[0] - src = src[1:] - } - s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) - src = src[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - s.Reserved[idx] = uint8(src[0]) - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalContext64) Packed() bool { - return s.Fpsimd64.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalContext64) MarshalUnsafe(dst []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalContext64) UnmarshalUnsafe(src []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go index d0c9ec652..da6aad4a5 100644 --- a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go +++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go @@ -19,6 +19,95 @@ var _ marshal.Marshallable = (*UID)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit +func (uid *UID) SizeBytes() int { + return 4 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (uid *UID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid)) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (uid *UID) UnmarshalBytes(src []byte) { + *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (uid *UID) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (uid *UID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(uid)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (uid *UID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(uid), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return uid.CopyOutN(cc, addr, uid.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (uid *UID) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) + hdr.Len = uid.SizeBytes() + hdr.Cap = uid.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that uid + // must live until the use above. + runtime.KeepAlive(uid) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit func (gid *GID) SizeBytes() int { return 4 } @@ -194,92 +283,3 @@ func UnmarshalUnsafeGIDSlice(dst []GID, src []byte) (int, error) { return length, err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (uid *UID) SizeBytes() int { - return 4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (uid *UID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid)) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (uid *UID) UnmarshalBytes(src []byte) { - *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4]))) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (uid *UID) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (uid *UID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(uid)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (uid *UID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(uid), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return uid.CopyOutN(cc, addr, uid.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (uid *UID) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid))) - hdr.Len = uid.SizeBytes() - hdr.Cap = uid.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that uid - // must live until the use above. - runtime.KeepAlive(uid) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go index 516ea1842..ccd56285a 100644 --- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go +++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go @@ -112,26 +112,49 @@ func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *multipleMessageHeader64) SizeBytes() int { - return 8 + - (*MessageHeader64)(nil).SizeBytes() +func (m *MessageHeader64) SizeBytes() int { + return 56 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { - m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) - dst = dst[m.msgHdr.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) +func (m *MessageHeader64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { - m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) - src = src[m.msgHdr.SizeBytes():] - m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (m *MessageHeader64) UnmarshalBytes(src []byte) { + m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -139,40 +162,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *multipleMessageHeader64) Packed() bool { - return m.msgHdr.Packed() +func (m *MessageHeader64) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { - if m.msgHdr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(m)) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. - m.MarshalBytes(dst) - } +func (m *MessageHeader64) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(m)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { - if m.msgHdr.Packed() { - safecopy.CopyOut(unsafe.Pointer(m), src) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - m.UnmarshalBytes(src) - } +func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(m), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - m.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -189,23 +195,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem. // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - m.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -221,15 +217,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad } // WriteTo implements io.WriterTo.WriteTo. -func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, m.SizeBytes()) - m.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -245,49 +233,26 @@ func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *MessageHeader64) SizeBytes() int { - return 56 +func (m *multipleMessageHeader64) SizeBytes() int { + return 8 + + (*MessageHeader64)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *MessageHeader64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) +func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { + m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) + dst = dst[m.msgHdr.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *MessageHeader64) UnmarshalBytes(src []byte) { - m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) +func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { + m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) + src = src[m.msgHdr.SizeBytes():] + m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -295,23 +260,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *MessageHeader64) Packed() bool { - return true +func (m *multipleMessageHeader64) Packed() bool { + return m.msgHdr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *MessageHeader64) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(m)) +func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { + if m.msgHdr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(m)) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. + m.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(m), src) +func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { + if m.msgHdr.Packed() { + safecopy.CopyOut(unsafe.Pointer(m), src) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + m.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + m.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -328,13 +310,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + m.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -350,7 +342,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int } // WriteTo implements io.WriterTo.WriteTo. -func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { +func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, m.SizeBytes()) + m.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) diff --git a/pkg/tcpip/header/ipv6.go b/pkg/tcpip/header/ipv6.go index 55d09355a..d522e5f10 100644 --- a/pkg/tcpip/header/ipv6.go +++ b/pkg/tcpip/header/ipv6.go @@ -48,11 +48,13 @@ type IPv6Fields struct { // FlowLabel is the "flow label" field of an IPv6 packet. FlowLabel uint32 - // PayloadLength is the "payload length" field of an IPv6 packet. + // PayloadLength is the "payload length" field of an IPv6 packet, including + // the length of all extension headers. PayloadLength uint16 - // NextHeader is the "next header" field of an IPv6 packet. - NextHeader uint8 + // TransportProtocol is the transport layer protocol number. Serialized in the + // last "next header" field of the IPv6 header + extension headers. + TransportProtocol tcpip.TransportProtocolNumber // HopLimit is the "Hop Limit" field of an IPv6 packet. HopLimit uint8 @@ -62,6 +64,9 @@ type IPv6Fields struct { // DstAddr is the "destination ip address" of an IPv6 packet. DstAddr tcpip.Address + + // ExtensionHeaders are the extension headers following the IPv6 header. + ExtensionHeaders IPv6ExtHdrSerializer } // IPv6 represents an ipv6 header stored in a byte array. @@ -253,12 +258,14 @@ func (IPv6) SetChecksum(uint16) { // Encode encodes all the fields of the ipv6 header. func (b IPv6) Encode(i *IPv6Fields) { + extHdr := b[IPv6MinimumSize:] b.SetTOS(i.TrafficClass, i.FlowLabel) b.SetPayloadLength(i.PayloadLength) - b[IPv6NextHeaderOffset] = i.NextHeader b[hopLimit] = i.HopLimit b.SetSourceAddress(i.SrcAddr) b.SetDestinationAddress(i.DstAddr) + nextHeader, _ := i.ExtensionHeaders.Serialize(i.TransportProtocol, extHdr) + b[IPv6NextHeaderOffset] = nextHeader } // IsValid performs basic validation on the packet. diff --git a/pkg/tcpip/header/ipv6_extension_headers.go b/pkg/tcpip/header/ipv6_extension_headers.go index 571eae233..1fbb2cc98 100644 --- a/pkg/tcpip/header/ipv6_extension_headers.go +++ b/pkg/tcpip/header/ipv6_extension_headers.go @@ -20,7 +20,9 @@ import ( "encoding/binary" "fmt" "io" + "math" + "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/buffer" ) @@ -75,8 +77,8 @@ const ( // Fragment Offset field within an IPv6FragmentExtHdr. ipv6FragmentExtHdrFragmentOffsetOffset = 0 - // ipv6FragmentExtHdrFragmentOffsetShift is the least significant bits to - // discard from the Fragment Offset. + // ipv6FragmentExtHdrFragmentOffsetShift is the bit offset of the Fragment + // Offset field within an IPv6FragmentExtHdr. ipv6FragmentExtHdrFragmentOffsetShift = 3 // ipv6FragmentExtHdrFlagsIdx is the index to the flags field within an @@ -114,6 +116,37 @@ const ( IPv6FragmentExtHdrFragmentOffsetBytesPerUnit = 8 ) +// padIPv6OptionsLength returns the total length for IPv6 options of length l +// considering the 8-octet alignment as stated in RFC 8200 Section 4.2. +func padIPv6OptionsLength(length int) int { + return (length + ipv6ExtHdrLenBytesPerUnit - 1) & ^(ipv6ExtHdrLenBytesPerUnit - 1) +} + +// padIPv6Option fills b with the appropriate padding options depending on its +// length. +func padIPv6Option(b []byte) { + switch len(b) { + case 0: // No padding needed. + case 1: // Pad with Pad1. + b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6Pad1ExtHdrOptionIdentifier) + default: // Pad with PadN. + s := b[ipv6ExtHdrOptionPayloadOffset:] + for i := range s { + s[i] = 0 + } + b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6PadNExtHdrOptionIdentifier) + b[ipv6ExtHdrOptionLengthOffset] = uint8(len(s)) + } +} + +// ipv6OptionsAlignmentPadding returns the number of padding bytes needed to +// serialize an option at headerOffset with alignment requirements +// [align]n + alignOffset. +func ipv6OptionsAlignmentPadding(headerOffset int, align int, alignOffset int) int { + padLen := headerOffset - alignOffset + return ((padLen + align - 1) & ^(align - 1)) - padLen +} + // IPv6PayloadHeader is implemented by the various headers that can be found // in an IPv6 payload. // @@ -206,29 +239,51 @@ type IPv6ExtHdrOption interface { isIPv6ExtHdrOption() } -// IPv6ExtHdrOptionIndentifier is an IPv6 extension header option identifier. -type IPv6ExtHdrOptionIndentifier uint8 +// IPv6ExtHdrOptionIdentifier is an IPv6 extension header option identifier. +type IPv6ExtHdrOptionIdentifier uint8 const ( // ipv6Pad1ExtHdrOptionIdentifier is the identifier for a padding option that // provides 1 byte padding, as outlined in RFC 8200 section 4.2. - ipv6Pad1ExtHdrOptionIdentifier IPv6ExtHdrOptionIndentifier = 0 + ipv6Pad1ExtHdrOptionIdentifier IPv6ExtHdrOptionIdentifier = 0 // ipv6PadBExtHdrOptionIdentifier is the identifier for a padding option that // provides variable length byte padding, as outlined in RFC 8200 section 4.2. - ipv6PadNExtHdrOptionIdentifier IPv6ExtHdrOptionIndentifier = 1 + ipv6PadNExtHdrOptionIdentifier IPv6ExtHdrOptionIdentifier = 1 + + // ipv6RouterAlertHopByHopOptionIdentifier is the identifier for the Router + // Alert Hop by Hop option as defined in RFC 2711 section 2.1. + ipv6RouterAlertHopByHopOptionIdentifier IPv6ExtHdrOptionIdentifier = 5 + + // ipv6ExtHdrOptionTypeOffset is the option type offset in an extension header + // option as defined in RFC 8200 section 4.2. + ipv6ExtHdrOptionTypeOffset = 0 + + // ipv6ExtHdrOptionLengthOffset is the option length offset in an extension + // header option as defined in RFC 8200 section 4.2. + ipv6ExtHdrOptionLengthOffset = 1 + + // ipv6ExtHdrOptionPayloadOffset is the option payload offset in an extension + // header option as defined in RFC 8200 section 4.2. + ipv6ExtHdrOptionPayloadOffset = 2 ) +// ipv6UnknownActionFromIdentifier maps an extension header option's +// identifier's high bits to the action to take when the identifier is unknown. +func ipv6UnknownActionFromIdentifier(id IPv6ExtHdrOptionIdentifier) IPv6OptionUnknownAction { + return IPv6OptionUnknownAction((id & ipv6UnknownExtHdrOptionActionMask) >> ipv6UnknownExtHdrOptionActionShift) +} + // IPv6UnknownExtHdrOption holds the identifier and data for an IPv6 extension // header option that is unknown by the parsing utilities. type IPv6UnknownExtHdrOption struct { - Identifier IPv6ExtHdrOptionIndentifier + Identifier IPv6ExtHdrOptionIdentifier Data []byte } // UnknownAction implements IPv6OptionUnknownAction.UnknownAction. func (o *IPv6UnknownExtHdrOption) UnknownAction() IPv6OptionUnknownAction { - return IPv6OptionUnknownAction((o.Identifier & ipv6UnknownExtHdrOptionActionMask) >> ipv6UnknownExtHdrOptionActionShift) + return ipv6UnknownActionFromIdentifier(o.Identifier) } // isIPv6ExtHdrOption implements IPv6ExtHdrOption.isIPv6ExtHdrOption. @@ -251,7 +306,7 @@ func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error // options buffer has been exhausted and we are done iterating. return nil, true, nil } - id := IPv6ExtHdrOptionIndentifier(temp) + id := IPv6ExtHdrOptionIdentifier(temp) // If the option identifier indicates the option is a Pad1 option, then we // know the option does not have Length and Data fields. End processing of @@ -294,6 +349,14 @@ func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error panic(fmt.Sprintf("error when skipping PadN (N = %d) option's data bytes: %s", length, err)) } continue + case ipv6RouterAlertHopByHopOptionIdentifier: + var routerAlertValue [ipv6RouterAlertPayloadLength]byte + if n, err := i.reader.Read(routerAlertValue[:]); err != nil { + panic(fmt.Sprintf("error when reading RouterAlert option's data bytes: %s", err)) + } else if n != ipv6RouterAlertPayloadLength { + return nil, true, fmt.Errorf("read %d bytes for RouterAlert option, expected %d", n, ipv6RouterAlertPayloadLength) + } + return &IPv6RouterAlertOption{Value: IPv6RouterAlertValue(binary.BigEndian.Uint16(routerAlertValue[:]))}, false, nil default: bytes := make([]byte, length) if n, err := io.ReadFull(&i.reader, bytes); err != nil { @@ -609,3 +672,248 @@ func (i *IPv6PayloadIterator) nextHeaderData(fragmentHdr bool, bytes []byte) (IP return IPv6ExtensionHeaderIdentifier(nextHdrIdentifier), bytes, nil } + +// IPv6SerializableExtHdr provides serialization for IPv6 extension +// headers. +type IPv6SerializableExtHdr interface { + // identifier returns the assigned IPv6 header identifier for this extension + // header. + identifier() IPv6ExtensionHeaderIdentifier + + // length returns the total serialized length in bytes of this extension + // header, including the common next header and length fields. + length() int + + // serializeInto serializes the receiver into the provided byte + // buffer and with the provided nextHeader value. + // + // Note, the caller MUST provide a byte buffer with size of at least + // length. Implementers of this function may assume that the byte buffer + // is of sufficient size. serializeInto MAY panic if the provided byte + // buffer is not of sufficient size. + // + // serializeInto returns the number of bytes that was used to serialize the + // receiver. Implementers must only use the number of bytes required to + // serialize the receiver. Callers MAY provide a larger buffer than required + // to serialize into. + serializeInto(nextHeader uint8, b []byte) int +} + +var _ IPv6SerializableExtHdr = (*IPv6SerializableHopByHopExtHdr)(nil) + +// IPv6SerializableHopByHopExtHdr implements serialization of the Hop by Hop +// options extension header. +type IPv6SerializableHopByHopExtHdr []IPv6SerializableHopByHopOption + +const ( + // ipv6HopByHopExtHdrNextHeaderOffset is the offset of the next header field + // in a hop by hop extension header as defined in RFC 8200 section 4.3. + ipv6HopByHopExtHdrNextHeaderOffset = 0 + + // ipv6HopByHopExtHdrLengthOffset is the offset of the length field in a hop + // by hop extension header as defined in RFC 8200 section 4.3. + ipv6HopByHopExtHdrLengthOffset = 1 + + // ipv6HopByHopExtHdrPayloadOffset is the offset of the options in a hop by + // hop extension header as defined in RFC 8200 section 4.3. + ipv6HopByHopExtHdrOptionsOffset = 2 + + // ipv6HopByHopExtHdrUnaccountedLenWords is the implicit number of 8-octet + // words in a hop by hop extension header's length field, as stated in RFC + // 8200 section 4.3: + // Length of the Hop-by-Hop Options header in 8-octet units, + // not including the first 8 octets. + ipv6HopByHopExtHdrUnaccountedLenWords = 1 +) + +// identifier implements IPv6SerializableExtHdr. +func (IPv6SerializableHopByHopExtHdr) identifier() IPv6ExtensionHeaderIdentifier { + return IPv6HopByHopOptionsExtHdrIdentifier +} + +// length implements IPv6SerializableExtHdr. +func (h IPv6SerializableHopByHopExtHdr) length() int { + var total int + for _, opt := range h { + align, alignOffset := opt.alignment() + total += ipv6OptionsAlignmentPadding(total, align, alignOffset) + total += ipv6ExtHdrOptionPayloadOffset + int(opt.length()) + } + // Account for next header and total length fields and add padding. + return padIPv6OptionsLength(ipv6HopByHopExtHdrOptionsOffset + total) +} + +// serializeInto implements IPv6SerializableExtHdr. +func (h IPv6SerializableHopByHopExtHdr) serializeInto(nextHeader uint8, b []byte) int { + optBuffer := b[ipv6HopByHopExtHdrOptionsOffset:] + totalLength := ipv6HopByHopExtHdrOptionsOffset + for _, opt := range h { + // Calculate alignment requirements and pad buffer if necessary. + align, alignOffset := opt.alignment() + padLen := ipv6OptionsAlignmentPadding(totalLength, align, alignOffset) + if padLen != 0 { + padIPv6Option(optBuffer[:padLen]) + totalLength += padLen + optBuffer = optBuffer[padLen:] + } + + l := opt.serializeInto(optBuffer[ipv6ExtHdrOptionPayloadOffset:]) + optBuffer[ipv6ExtHdrOptionTypeOffset] = uint8(opt.identifier()) + optBuffer[ipv6ExtHdrOptionLengthOffset] = l + l += ipv6ExtHdrOptionPayloadOffset + totalLength += int(l) + optBuffer = optBuffer[l:] + } + padded := padIPv6OptionsLength(totalLength) + if padded != totalLength { + padIPv6Option(optBuffer[:padded-totalLength]) + totalLength = padded + } + wordsLen := totalLength/ipv6ExtHdrLenBytesPerUnit - ipv6HopByHopExtHdrUnaccountedLenWords + if wordsLen > math.MaxUint8 { + panic(fmt.Sprintf("IPv6 hop by hop options too large: %d+1 64-bit words", wordsLen)) + } + b[ipv6HopByHopExtHdrNextHeaderOffset] = nextHeader + b[ipv6HopByHopExtHdrLengthOffset] = uint8(wordsLen) + return totalLength +} + +// IPv6SerializableHopByHopOption provides serialization for hop by hop options. +type IPv6SerializableHopByHopOption interface { + // identifier returns the option identifier of this Hop by Hop option. + identifier() IPv6ExtHdrOptionIdentifier + + // length returns the *payload* size of the option (not considering the type + // and length fields). + length() uint8 + + // alignment returns the alignment requirements from this option. + // + // Alignment requirements take the form [align]n + offset as specified in + // RFC 8200 section 4.2. The alignment requirement is on the offset between + // the option type byte and the start of the hop by hop header. + // + // align must be a power of 2. + alignment() (align int, offset int) + + // serializeInto serializes the receiver into the provided byte + // buffer. + // + // Note, the caller MUST provide a byte buffer with size of at least + // length. Implementers of this function may assume that the byte buffer + // is of sufficient size. serializeInto MAY panic if the provided byte + // buffer is not of sufficient size. + // + // serializeInto will return the number of bytes that was used to + // serialize the receiver. Implementers must only use the number of + // bytes required to serialize the receiver. Callers MAY provide a + // larger buffer than required to serialize into. + serializeInto([]byte) uint8 +} + +var _ IPv6SerializableHopByHopOption = (*IPv6RouterAlertOption)(nil) + +// IPv6RouterAlertOption is the IPv6 Router alert Hop by Hop option defined in +// RFC 2711 section 2.1. +type IPv6RouterAlertOption struct { + Value IPv6RouterAlertValue +} + +// IPv6RouterAlertValue is the payload of an IPv6 Router Alert option. +type IPv6RouterAlertValue uint16 + +const ( + // IPv6RouterAlertMLD indicates a datagram containing a Multicast Listener + // Discovery message as defined in RFC 2711 section 2.1. + IPv6RouterAlertMLD IPv6RouterAlertValue = 0 + // IPv6RouterAlertRSVP indicates a datagram containing an RSVP message as + // defined in RFC 2711 section 2.1. + IPv6RouterAlertRSVP IPv6RouterAlertValue = 1 + // IPv6RouterAlertActiveNetworks indicates a datagram containing an Active + // Networks message as defined in RFC 2711 section 2.1. + IPv6RouterAlertActiveNetworks IPv6RouterAlertValue = 2 + + // ipv6RouterAlertPayloadLength is the length of the Router Alert payload + // as defined in RFC 2711. + ipv6RouterAlertPayloadLength = 2 + + // ipv6RouterAlertAlignmentRequirement is the alignment requirement for the + // Router Alert option defined as 2n+0 in RFC 2711. + ipv6RouterAlertAlignmentRequirement = 2 + + // ipv6RouterAlertAlignmentOffsetRequirement is the alignment offset + // requirement for the Router Alert option defined as 2n+0 in RFC 2711 section + // 2.1. + ipv6RouterAlertAlignmentOffsetRequirement = 0 +) + +// UnknownAction implements IPv6ExtHdrOption. +func (*IPv6RouterAlertOption) UnknownAction() IPv6OptionUnknownAction { + return ipv6UnknownActionFromIdentifier(ipv6RouterAlertHopByHopOptionIdentifier) +} + +// isIPv6ExtHdrOption implements IPv6ExtHdrOption. +func (*IPv6RouterAlertOption) isIPv6ExtHdrOption() {} + +// identifier implements IPv6SerializableHopByHopOption. +func (*IPv6RouterAlertOption) identifier() IPv6ExtHdrOptionIdentifier { + return ipv6RouterAlertHopByHopOptionIdentifier +} + +// length implements IPv6SerializableHopByHopOption. +func (*IPv6RouterAlertOption) length() uint8 { + return ipv6RouterAlertPayloadLength +} + +// alignment implements IPv6SerializableHopByHopOption. +func (*IPv6RouterAlertOption) alignment() (int, int) { + // From RFC 2711 section 2.1: + // Alignment requirement: 2n+0. + return ipv6RouterAlertAlignmentRequirement, ipv6RouterAlertAlignmentOffsetRequirement +} + +// serializeInto implements IPv6SerializableHopByHopOption. +func (o *IPv6RouterAlertOption) serializeInto(b []byte) uint8 { + binary.BigEndian.PutUint16(b, uint16(o.Value)) + return ipv6RouterAlertPayloadLength +} + +// IPv6ExtHdrSerializer provides serialization of IPv6 extension headers. +type IPv6ExtHdrSerializer []IPv6SerializableExtHdr + +// Serialize serializes the provided list of IPv6 extension headers into b. +// +// Note, b must be of sufficient size to hold all the headers in s. See +// IPv6ExtHdrSerializer.Length for details on the getting the total size of a +// serialized IPv6ExtHdrSerializer. +// +// Serialize may panic if b is not of sufficient size to hold all the options +// in s. +// +// Serialize takes the transportProtocol value to be used as the last extension +// header's Next Header value and returns the header identifier of the first +// serialized extension header and the total serialized length. +func (s IPv6ExtHdrSerializer) Serialize(transportProtocol tcpip.TransportProtocolNumber, b []byte) (uint8, int) { + nextHeader := uint8(transportProtocol) + if len(s) == 0 { + return nextHeader, 0 + } + var totalLength int + for i, h := range s[:len(s)-1] { + length := h.serializeInto(uint8(s[i+1].identifier()), b) + b = b[length:] + totalLength += length + } + totalLength += s[len(s)-1].serializeInto(nextHeader, b) + return uint8(s[0].identifier()), totalLength +} + +// Length returns the total number of bytes required to serialize the extension +// headers. +func (s IPv6ExtHdrSerializer) Length() int { + var totalLength int + for _, h := range s { + totalLength += h.length() + } + return totalLength +} diff --git a/pkg/tcpip/header/ipv6_fragment.go b/pkg/tcpip/header/ipv6_fragment.go index 018555a26..9d09f32eb 100644 --- a/pkg/tcpip/header/ipv6_fragment.go +++ b/pkg/tcpip/header/ipv6_fragment.go @@ -27,12 +27,11 @@ const ( idV6 = 4 ) -// IPv6FragmentFields contains the fields of an IPv6 fragment. It is used to describe the -// fields of a packet that needs to be encoded. -type IPv6FragmentFields struct { - // NextHeader is the "next header" field of an IPv6 fragment. - NextHeader uint8 +var _ IPv6SerializableExtHdr = (*IPv6SerializableFragmentExtHdr)(nil) +// IPv6SerializableFragmentExtHdr is used to serialize an IPv6 fragment +// extension header as defined in RFC 8200 section 4.5. +type IPv6SerializableFragmentExtHdr struct { // FragmentOffset is the "fragment offset" field of an IPv6 fragment. FragmentOffset uint16 @@ -43,6 +42,29 @@ type IPv6FragmentFields struct { Identification uint32 } +// identifier implements IPv6SerializableFragmentExtHdr. +func (h *IPv6SerializableFragmentExtHdr) identifier() IPv6ExtensionHeaderIdentifier { + return IPv6FragmentHeader +} + +// length implements IPv6SerializableFragmentExtHdr. +func (h *IPv6SerializableFragmentExtHdr) length() int { + return IPv6FragmentHeaderSize +} + +// serializeInto implements IPv6SerializableFragmentExtHdr. +func (h *IPv6SerializableFragmentExtHdr) serializeInto(nextHeader uint8, b []byte) int { + // Prevent too many bounds checks. + _ = b[IPv6FragmentHeaderSize:] + binary.BigEndian.PutUint32(b[idV6:], h.Identification) + binary.BigEndian.PutUint16(b[fragOff:], h.FragmentOffset<<ipv6FragmentExtHdrFragmentOffsetShift) + b[nextHdrFrag] = nextHeader + if h.M { + b[more] |= ipv6FragmentExtHdrMFlagMask + } + return IPv6FragmentHeaderSize +} + // IPv6Fragment represents an ipv6 fragment header stored in a byte array. // Most of the methods of IPv6Fragment access to the underlying slice without // checking the boundaries and could panic because of 'index out of range'. @@ -58,16 +80,6 @@ const ( IPv6FragmentHeaderSize = 8 ) -// Encode encodes all the fields of the ipv6 fragment. -func (b IPv6Fragment) Encode(i *IPv6FragmentFields) { - b[nextHdrFrag] = i.NextHeader - binary.BigEndian.PutUint16(b[fragOff:], i.FragmentOffset<<3) - if i.M { - b[more] |= 1 - } - binary.BigEndian.PutUint32(b[idV6:], i.Identification) -} - // IsValid performs basic validation on the fragment header. func (b IPv6Fragment) IsValid() bool { return len(b) >= IPv6FragmentHeaderSize diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go index e506e99e9..a49b5ac77 100644 --- a/pkg/tcpip/network/ipv6/ipv6.go +++ b/pkg/tcpip/network/ipv6/ipv6.go @@ -19,6 +19,7 @@ import ( "encoding/binary" "fmt" "hash/fnv" + "math" "sort" "sync/atomic" "time" @@ -431,19 +432,27 @@ func (e *endpoint) MTU() uint32 { // MaxHeaderLength returns the maximum length needed by ipv6 headers (and // underlying protocols). func (e *endpoint) MaxHeaderLength() uint16 { + // TODO(gvisor.dev/issues/5035): The maximum header length returned here does + // not open the possibility for the caller to know about size required for + // extension headers. return e.nic.MaxHeaderLength() + header.IPv6MinimumSize } -func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams) { - length := uint16(pkt.Size()) - ip := header.IPv6(pkt.NetworkHeader().Push(header.IPv6MinimumSize)) +func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) { + extHdrsLen := extensionHeaders.Length() + length := pkt.Size() + extensionHeaders.Length() + if length > math.MaxUint16 { + panic(fmt.Sprintf("IPv6 payload too large: %d, must be <= %d", length, math.MaxUint16)) + } + ip := header.IPv6(pkt.NetworkHeader().Push(header.IPv6MinimumSize + extHdrsLen)) ip.Encode(&header.IPv6Fields{ - PayloadLength: length, - NextHeader: uint8(params.Protocol), - HopLimit: params.TTL, - TrafficClass: params.TOS, - SrcAddr: srcAddr, - DstAddr: dstAddr, + PayloadLength: uint16(length), + TransportProtocol: params.Protocol, + HopLimit: params.TTL, + TrafficClass: params.TOS, + SrcAddr: srcAddr, + DstAddr: dstAddr, + ExtensionHeaders: extensionHeaders, }) pkt.NetworkProtocolNumber = ProtocolNumber } @@ -498,7 +507,7 @@ func (e *endpoint) handleFragments(r *stack.Route, gso *stack.GSO, networkMTU ui // WritePacket writes a packet to the given destination address and protocol. func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) *tcpip.Error { - e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params) + e.addIPHeader(r.LocalAddress, r.RemoteAddress, pkt, params, nil /* extensionHeaders */) // iptables filtering. All packets that reach here are locally // generated. @@ -587,7 +596,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe linkMTU := e.nic.MTU() for pb := pkts.Front(); pb != nil; pb = pb.Next() { - e.addIPHeader(r.LocalAddress, r.RemoteAddress, pb, params) + e.addIPHeader(r.LocalAddress, r.RemoteAddress, pb, params, nil /* extensionHeaders */) networkMTU, err := calculateNetworkMTU(linkMTU, uint32(pb.NetworkHeader().View().Size())) if err != nil { @@ -1793,24 +1802,25 @@ func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeaders hea fragPkt.NetworkProtocolNumber = ProtocolNumber originalIPHeadersLength := len(originalIPHeaders) - fragmentIPHeadersLength := originalIPHeadersLength + header.IPv6FragmentHeaderSize + + s := header.IPv6ExtHdrSerializer{&header.IPv6SerializableFragmentExtHdr{ + FragmentOffset: uint16(offset / header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit), + M: more, + Identification: id, + }} + + fragmentIPHeadersLength := originalIPHeadersLength + s.Length() fragmentIPHeaders := header.IPv6(fragPkt.NetworkHeader().Push(fragmentIPHeadersLength)) - fragPkt.NetworkProtocolNumber = ProtocolNumber // Copy the IPv6 header and any extension headers already populated. if copied := copy(fragmentIPHeaders, originalIPHeaders); copied != originalIPHeadersLength { panic(fmt.Sprintf("wrong number of bytes copied into fragmentIPHeaders: got %d, want %d", copied, originalIPHeadersLength)) } - fragmentIPHeaders.SetNextHeader(header.IPv6FragmentHeader) - fragmentIPHeaders.SetPayloadLength(uint16(copied + fragmentIPHeadersLength - header.IPv6MinimumSize)) - fragmentHeader := header.IPv6Fragment(fragmentIPHeaders[originalIPHeadersLength:]) - fragmentHeader.Encode(&header.IPv6FragmentFields{ - M: more, - FragmentOffset: uint16(offset / header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit), - Identification: id, - NextHeader: uint8(transportProto), - }) + nextHeader, _ := s.Serialize(transportProto, fragmentIPHeaders[originalIPHeadersLength:]) + + fragmentIPHeaders.SetNextHeader(nextHeader) + fragmentIPHeaders.SetPayloadLength(uint16(copied + fragmentIPHeadersLength - header.IPv6MinimumSize)) return fragPkt, more } diff --git a/pkg/tcpip/network/ipv6/mld.go b/pkg/tcpip/network/ipv6/mld.go index 48644d9c8..6f64b8462 100644 --- a/pkg/tcpip/network/ipv6/mld.go +++ b/pkg/tcpip/network/ipv6/mld.go @@ -234,17 +234,21 @@ func (mld *mldState) writePacket(destAddress, groupAddress tcpip.Address, mldTyp icmp.SetChecksum(header.ICMPv6Checksum(icmp, localAddress, destAddress, buffer.VectorisedView{})) + extensionHeaders := header.IPv6ExtHdrSerializer{ + header.IPv6SerializableHopByHopExtHdr{ + &header.IPv6RouterAlertOption{Value: header.IPv6RouterAlertMLD}, + }, + } + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ - ReserveHeaderBytes: int(mld.ep.MaxHeaderLength()), + ReserveHeaderBytes: int(mld.ep.MaxHeaderLength()) + extensionHeaders.Length(), Data: buffer.View(icmp).ToVectorisedView(), }) mld.ep.addIPHeader(localAddress, destAddress, pkt, stack.NetworkHeaderParams{ Protocol: header.ICMPv6ProtocolNumber, TTL: header.MLDHopLimit, - }) - // TODO(b/162198658): set the ROUTER_ALERT option when sending Host - // Membership Reports. + }, extensionHeaders) if err := mld.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(destAddress), nil /* gso */, ProtocolNumber, pkt); err != nil { sentStats.Dropped.Increment() return false, err diff --git a/pkg/tcpip/network/ipv6/ndp.go b/pkg/tcpip/network/ipv6/ndp.go index 3b892aeda..d515eb622 100644 --- a/pkg/tcpip/network/ipv6/ndp.go +++ b/pkg/tcpip/network/ipv6/ndp.go @@ -735,7 +735,7 @@ func (ndp *ndpState) sendDADPacket(addr tcpip.Address, addressEndpoint stack.Add ndp.ep.addIPHeader(header.IPv6Any, snmc, pkt, stack.NetworkHeaderParams{ Protocol: header.ICMPv6ProtocolNumber, TTL: header.NDPHopLimit, - }) + }, nil /* extensionHeaders */) if err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(snmc), nil /* gso */, ProtocolNumber, pkt); err != nil { sent.Dropped.Increment() @@ -1857,7 +1857,7 @@ func (ndp *ndpState) startSolicitingRouters() { ndp.ep.addIPHeader(localAddr, header.IPv6AllRoutersMulticastAddress, pkt, stack.NetworkHeaderParams{ Protocol: header.ICMPv6ProtocolNumber, TTL: header.NDPHopLimit, - }) + }, nil /* extensionHeaders */) if err := ndp.ep.nic.WritePacketToRemote(header.EthernetAddressFromMulticastIPv6Address(header.IPv6AllRoutersMulticastAddress), nil /* gso */, ProtocolNumber, pkt); err != nil { sent.Dropped.Increment() |