diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-12-30 19:26:31 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-12-30 19:26:31 +0000 |
commit | fab70369685c35fa4b8e305f488b9f9232043e01 (patch) | |
tree | 5fd611b135507bcfb5f88a6bf3970f6b5d4e5e37 /pkg | |
parent | 37811c1b997c9d492ee4c24dfaa2813637bbd540 (diff) | |
parent | 1b66bad7c47e914994e19f39119d91ab6805002a (diff) |
Merge release-20201208.0-97-g1b66bad7c (automated)
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/abi/linux/linux_abi_autogen_unsafe.go | 3840 | ||||
-rw-r--r-- | pkg/marshal/primitive/primitive_abi_autogen_unsafe.go | 354 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_abi_autogen_unsafe.go | 244 | ||||
-rw-r--r-- | pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go | 322 |
4 files changed, 2380 insertions, 2380 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go index e64c0a4da..99a8ee562 100644 --- a/pkg/abi/linux/linux_abi_autogen_unsafe.go +++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go @@ -524,99 +524,6 @@ func UnmarshalUnsafeBPFInstructionSlice(dst []BPFInstruction, src []byte) (int, } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (c *CapUserHeader) SizeBytes() int { - return 8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (c *CapUserHeader) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Version)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Pid)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (c *CapUserHeader) UnmarshalBytes(src []byte) { - c.Version = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - c.Pid = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (c *CapUserHeader) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (c *CapUserHeader) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(c)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (c *CapUserHeader) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(c), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return c.CopyOutN(cc, addr, c.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (c *CapUserHeader) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (c *CapUserData) SizeBytes() int { return 12 } @@ -800,132 +707,95 @@ func UnmarshalUnsafeCapUserDataSlice(dst []CapUserData, src []byte) (int, error) } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockErrCMsgIPv6) SizeBytes() int { - return 0 + - (*SockExtendedErr)(nil).SizeBytes() + - (*SockAddrInet6)(nil).SizeBytes() +func (c *CapUserHeader) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockErrCMsgIPv6) MarshalBytes(dst []byte) { - s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()]) - dst = dst[s.SockExtendedErr.SizeBytes():] - s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()]) - dst = dst[s.Offender.SizeBytes():] +func (c *CapUserHeader) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Version)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Pid)) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockErrCMsgIPv6) UnmarshalBytes(src []byte) { - s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()]) - src = src[s.SockExtendedErr.SizeBytes():] - s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()]) - src = src[s.Offender.SizeBytes():] +func (c *CapUserHeader) UnmarshalBytes(src []byte) { + c.Version = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.Pid = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockErrCMsgIPv6) Packed() bool { - return s.Offender.Packed() && s.SockExtendedErr.Packed() +func (c *CapUserHeader) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockErrCMsgIPv6) MarshalUnsafe(dst []byte) { - if s.Offender.Packed() && s.SockExtendedErr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } +func (c *CapUserHeader) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(c)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockErrCMsgIPv6) UnmarshalUnsafe(src []byte) { - if s.Offender.Packed() && s.SockExtendedErr.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } +func (c *CapUserHeader) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(c), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Offender.Packed() && s.SockExtendedErr.Packed() { - // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) +func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return c.CopyOutN(cc, addr, c.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Offender.Packed() && s.SockExtendedErr.Packed() { - // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockErrCMsgIPv6) WriteTo(writer io.Writer) (int64, error) { - if !s.Offender.Packed() && s.SockExtendedErr.Packed() { - // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (c *CapUserHeader) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that c // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return int64(length), err } @@ -1173,6 +1043,136 @@ func (s *SockErrCMsgIPv4) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SockErrCMsgIPv6) SizeBytes() int { + return 0 + + (*SockExtendedErr)(nil).SizeBytes() + + (*SockAddrInet6)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SockErrCMsgIPv6) MarshalBytes(dst []byte) { + s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()]) + dst = dst[s.SockExtendedErr.SizeBytes():] + s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()]) + dst = dst[s.Offender.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SockErrCMsgIPv6) UnmarshalBytes(src []byte) { + s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()]) + src = src[s.SockExtendedErr.SizeBytes():] + s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()]) + src = src[s.Offender.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SockErrCMsgIPv6) Packed() bool { + return s.Offender.Packed() && s.SockExtendedErr.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SockErrCMsgIPv6) MarshalUnsafe(dst []byte) { + if s.Offender.Packed() && s.SockExtendedErr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SockErrCMsgIPv6) UnmarshalUnsafe(src []byte) { + if s.Offender.Packed() && s.SockExtendedErr.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Offender.Packed() && s.SockExtendedErr.Packed() { + // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Offender.Packed() && s.SockExtendedErr.Packed() { + // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SockErrCMsgIPv6) WriteTo(writer io.Writer) (int64, error) { + if !s.Offender.Packed() && s.SockExtendedErr.Packed() { + // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (f *Flock) SizeBytes() int { return 24 + 1*4 + @@ -1728,65 +1728,53 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReadIn) SizeBytes() int { - return 40 +func (f *FUSEInitIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReadIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) +func (f *FUSEInitIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReadIn) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEInitIn) UnmarshalBytes(src []byte) { + f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReadIn) Packed() bool { +func (f *FUSEInitIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { +func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1803,13 +1791,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1825,7 +1813,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1841,45 +1829,83 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEWriteOut) SizeBytes() int { - return 8 +func (f *FUSEEntryOut) SizeBytes() int { + return 40 + + (*FUSEAttr)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEWriteOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) +func (f *FUSEEntryOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec)) dst = dst[4:] + f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) + dst = dst[f.Attr.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEWriteOut) UnmarshalBytes(src []byte) { - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEEntryOut) UnmarshalBytes(src []byte) { + f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] + f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) + src = src[f.Attr.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEWriteOut) Packed() bool { - return true +func (f *FUSEEntryOut) Packed() bool { + return f.Attr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) { + if f.Attr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) { + if f.Attr.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1896,13 +1922,23 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1918,7 +1954,15 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Attr.Packed() { + // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1934,29 +1978,41 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSECreateMeta) SizeBytes() int { - return 16 +func (f *FUSEReadIn) SizeBytes() int { + return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSECreateMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) +func (f *FUSEReadIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEReadIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -1964,23 +2020,23 @@ func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSECreateMeta) Packed() bool { +func (f *FUSEReadIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEReadIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -1997,13 +2053,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2019,7 +2075,7 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2035,272 +2091,65 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEDirentMeta) SizeBytes() int { - return 24 +func (f *FUSEWriteIn) SizeBytes() int { + return 40 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) +func (f *FUSEWriteIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (f *FUSEDirentMeta) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return f.CopyOutN(cc, addr, f.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpID) SizeBytes() int { - return 8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpID) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpID) UnmarshalBytes(src []byte) { - *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (f *FUSEOpID) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpID) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return f.CopyOutN(cc, addr, f.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) - hdr.Len = f.SizeBytes() - hdr.Cap = f.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that f - // must live until the use above. - runtime.KeepAlive(f) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitOut) SizeBytes() int { - return 32 + - 4*8 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) + dst = dst[8:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) - dst = dst[2:] - // Padding: dst[:sizeof(uint16)] ~= uint16(0) - dst = dst[2:] - // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} - dst = dst[4*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitOut) UnmarshalBytes(src []byte) { - f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - // Padding: var _ uint16 ~= src[:sizeof(uint16)] - src = src[2:] - // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) - src = src[4*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitOut) Packed() bool { +func (f *FUSEWriteIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { +func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2317,13 +2166,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2339,7 +2188,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2355,49 +2204,41 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenOut) SizeBytes() int { - return 16 +//go:nosplit +func (f *FUSEOpcode) SizeBytes() int { + return 4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] +func (f *FUSEOpcode) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] +func (f *FUSEOpcode) UnmarshalBytes(src []byte) { + *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenOut) Packed() bool { +func (f *FUSEOpcode) Packed() bool { + // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { +func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2414,13 +2255,13 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2436,7 +2277,7 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2444,7 +2285,7 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -2452,78 +2293,62 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEEntryOut) SizeBytes() int { - return 40 + - (*FUSEAttr)(nil).SizeBytes() +func (f *FUSEHeaderOut) SizeBytes() int { + return 8 + + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEEntryOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec)) +func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) dst = dst[4:] - f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) - dst = dst[f.Attr.SizeBytes():] + f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) + dst = dst[f.Unique.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEEntryOut) UnmarshalBytes(src []byte) { - f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { + f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) - src = src[f.Attr.SizeBytes():] + f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) + src = src[f.Unique.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEEntryOut) Packed() bool { - return f.Attr.Packed() +func (f *FUSEHeaderOut) Packed() bool { + return f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) { - if f.Attr.Packed() { +func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { + if f.Unique.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { - // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. f.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) { - if f.Attr.Packed() { +func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { + if f.Unique.Packed() { safecopy.CopyOut(unsafe.Pointer(f), src) } else { - // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. f.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. f.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -2545,15 +2370,15 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. +func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -2577,9 +2402,9 @@ func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Attr.Packed() { - // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Unique.Packed() { + // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, f.SizeBytes()) f.MarshalBytes(buf) length, err := writer.Write(buf) @@ -2601,21 +2426,21 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEOpenIn) SizeBytes() int { +func (f *FUSEWriteOut) SizeBytes() int { return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpenIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) +func (f *FUSEWriteOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEWriteOut) UnmarshalBytes(src []byte) { + f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -2623,23 +2448,23 @@ func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpenIn) Packed() bool { +func (f *FUSEWriteOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { +func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2656,13 +2481,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2678,7 +2503,7 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2694,65 +2519,53 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEWriteIn) SizeBytes() int { - return 40 +func (f *FUSEReleaseIn) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEWriteIn) MarshalBytes(dst []byte) { +func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) dst = dst[4:] usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEWriteIn) UnmarshalBytes(src []byte) { +func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEWriteIn) Packed() bool { +func (f *FUSEReleaseIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) { +func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2769,13 +2582,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2791,7 +2604,7 @@ func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -2956,62 +2769,172 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderOut) SizeBytes() int { - return 8 + +//go:nosplit +func (f *FUSEOpID) SizeBytes() int { + return 8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEOpID) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*f)) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEOpID) UnmarshalBytes(src []byte) { + *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8]))) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEOpID) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEOpID) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEOpID) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return f.CopyOutN(cc, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEHeaderIn) SizeBytes() int { + return 28 + + (*FUSEOpcode)(nil).SizeBytes() + (*FUSEOpID)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderOut) MarshalBytes(dst []byte) { +func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error)) - dst = dst[4:] + f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) + dst = dst[f.Opcode.SizeBytes():] f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) dst = dst[f.Unique.SizeBytes():] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) { +func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Error = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] + f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) + src = src[f.Opcode.SizeBytes():] f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) src = src[f.Unique.SizeBytes():] + f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderOut) Packed() bool { - return f.Unique.Packed() +func (f *FUSEHeaderIn) Packed() bool { + return f.Opcode.Packed() && f.Unique.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) { - if f.Unique.Packed() { +func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(f)) } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. f.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) { - if f.Unique.Packed() { +func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { + if f.Opcode.Packed() && f.Unique.Packed() { safecopy.CopyOut(unsafe.Pointer(f), src) } else { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. f.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. f.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -3033,15 +2956,15 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. +func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -3065,9 +2988,9 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Unique.Packed() { - // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes. +func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { + if !f.Opcode.Packed() && f.Unique.Packed() { + // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, f.SizeBytes()) f.MarshalBytes(buf) length, err := writer.Write(buf) @@ -3089,12 +3012,13 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEInitIn) SizeBytes() int { - return 16 +func (f *FUSEInitOut) SizeBytes() int { + return 32 + + 4*8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEInitIn) MarshalBytes(dst []byte) { +func (f *FUSEInitOut) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major)) dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor)) @@ -3103,10 +3027,24 @@ func (f *FUSEInitIn) MarshalBytes(dst []byte) { dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages)) + dst = dst[2:] + // Padding: dst[:sizeof(uint16)] ~= uint16(0) + dst = dst[2:] + // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0} + dst = dst[4*(8):] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEInitIn) UnmarshalBytes(src []byte) { +func (f *FUSEInitOut) UnmarshalBytes(src []byte) { f.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) @@ -3115,27 +3053,41 @@ func (f *FUSEInitIn) UnmarshalBytes(src []byte) { src = src[4:] f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] + f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + // Padding: var _ uint16 ~= src[:sizeof(uint16)] + src = src[2:] + // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8]) + src = src[4*(8):] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEInitIn) Packed() bool { +func (f *FUSEInitOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEInitIn) MarshalUnsafe(dst []byte) { +func (f *FUSEInitOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3152,13 +3104,13 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3174,7 +3126,7 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3190,101 +3142,49 @@ func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEAttr) SizeBytes() int { - return 88 +func (f *FUSEGetAttrIn) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEAttr) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) +func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEAttr) UnmarshalBytes(src []byte) { - f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { + f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] + f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEAttr) Packed() bool { +func (f *FUSEGetAttrIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEAttr) MarshalUnsafe(dst []byte) { +func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { +func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3301,13 +3201,13 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3323,7 +3223,7 @@ func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3339,45 +3239,71 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEMkdirMeta) SizeBytes() int { - return 8 +func (f *FUSEGetAttrOut) SizeBytes() int { + return 16 + + (*FUSEAttr)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) +func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] + f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) + dst = dst[f.Attr.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) { - f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) { + f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] + f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) + src = src[f.Attr.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEMkdirMeta) Packed() bool { - return true +func (f *FUSEGetAttrOut) Packed() bool { + return f.Attr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(f)) +func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) { + if f.Attr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(f)) + } else { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes. + f.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(f), src) +func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) { + if f.Attr.Packed() { + safecopy.CopyOut(unsafe.Pointer(f), src) + } else { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. + f.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + f.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3394,13 +3320,23 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + f.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3416,7 +3352,15 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { + if !f.Attr.Packed() { + // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, f.SizeBytes()) + f.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3432,49 +3376,45 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEGetAttrIn) SizeBytes() int { - return 16 +func (f *FUSEOpenIn) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags)) +func (f *FUSEOpenIn) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) { - f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSEOpenIn) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEGetAttrIn) Packed() bool { +func (f *FUSEOpenIn) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3491,13 +3431,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3513,7 +3453,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3529,53 +3469,49 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEReleaseIn) SizeBytes() int { - return 24 +func (f *FUSEOpenOut) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEReleaseIn) MarshalBytes(dst []byte) { +func (f *FUSEOpenOut) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh)) dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags)) + // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner)) - dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) { +func (f *FUSEOpenOut) UnmarshalBytes(src []byte) { f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4])) + // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEReleaseIn) Packed() bool { +func (f *FUSEOpenOut) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) { +func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) { +func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3592,13 +3528,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3614,7 +3550,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3630,53 +3566,45 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEMknodMeta) SizeBytes() int { - return 16 +func (f *FUSEMkdirMeta) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { +func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) { usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) - dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { +func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) { f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEMknodMeta) Packed() bool { +func (f *FUSEMkdirMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { +func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { +func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3693,13 +3621,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3715,7 +3643,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { +func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3731,41 +3659,53 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (f *FUSEOpcode) SizeBytes() int { - return 4 +func (f *FUSEDirentMeta) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEOpcode) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(*f)) +func (f *FUSEDirentMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type)) + dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEOpcode) UnmarshalBytes(src []byte) { - *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4]))) +func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Off = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Type = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEOpcode) Packed() bool { - // Scalar newtypes are always packed. +func (f *FUSEDirentMeta) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEOpcode) MarshalUnsafe(dst []byte) { +func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) { +func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3782,13 +3722,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3804,7 +3744,7 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { +func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3812,7 +3752,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { hdr.Len = f.SizeBytes() hdr.Cap = f.SizeBytes() - length, err := w.Write(buf) + length, err := writer.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that f // must live until the use above. runtime.KeepAlive(f) // escapes: replaced by intrinsic. @@ -3820,47 +3760,77 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEHeaderIn) SizeBytes() int { - return 28 + - (*FUSEOpcode)(nil).SizeBytes() + - (*FUSEOpID)(nil).SizeBytes() +func (f *FUSEAttr) SizeBytes() int { + return 88 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEHeaderIn) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len)) - dst = dst[4:] - f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()]) - dst = dst[f.Opcode.SizeBytes():] - f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()]) - dst = dst[f.Unique.SizeBytes():] - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID)) +func (f *FUSEAttr) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime)) dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink)) + dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID)) dst = dst[4:] usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID)) dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { - f.Len = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()]) - src = src[f.Opcode.SizeBytes():] - f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()]) - src = src[f.Unique.SizeBytes():] - f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8])) +func (f *FUSEAttr) UnmarshalBytes(src []byte) { + f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] + f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] f.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] f.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - f.PID = uint32(usermem.ByteOrder.Uint32(src[:4])) + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] @@ -3868,40 +3838,23 @@ func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEHeaderIn) Packed() bool { - return f.Opcode.Packed() && f.Unique.Packed() +func (f *FUSEAttr) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSEAttr) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) { - if f.Opcode.Packed() && f.Unique.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSEAttr) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3918,23 +3871,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3950,15 +3893,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { - if !f.Opcode.Packed() && f.Unique.Packed() { - // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -3974,71 +3909,53 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (f *FUSEGetAttrOut) SizeBytes() int { - return 16 + - (*FUSEAttr)(nil).SizeBytes() +func (f *FUSECreateMeta) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec)) +func (f *FUSECreateMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()]) - dst = dst[f.Attr.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) { - f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (f *FUSECreateMeta) UnmarshalBytes(src []byte) { + f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()]) - src = src[f.Attr.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (f *FUSEGetAttrOut) Packed() bool { - return f.Attr.Packed() +func (f *FUSECreateMeta) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) { - if f.Attr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(f)) - } else { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes. - f.MarshalBytes(dst) - } +func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) { - if f.Attr.Packed() { - safecopy.CopyOut(unsafe.Pointer(f), src) - } else { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes. - f.UnmarshalBytes(src) - } +func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - f.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -4055,23 +3972,13 @@ func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return f.CopyOutN(cc, addr, f.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - f.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -4087,15 +3994,108 @@ func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, } // WriteTo implements io.WriterTo.WriteTo. -func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) { - if !f.Attr.Packed() { - // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, f.SizeBytes()) - f.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } +func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (f *FUSEMknodMeta) SizeBytes() int { + return 16 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (f *FUSEMknodMeta) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) { + f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] +} +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (f *FUSEMknodMeta) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(f)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(f), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return f.CopyOutN(cc, addr, f.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f))) + hdr.Len = f.SizeBytes() + hdr.Cap = f.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that f + // must live until the use above. + runtime.KeepAlive(f) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -4648,104 +4648,6 @@ func (n *NumaPolicy) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IFConf) SizeBytes() int { - return 12 + - 1*4 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IFConf) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Len)) - dst = dst[4:] - // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} - dst = dst[1*(4):] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Ptr)) - dst = dst[8:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IFConf) UnmarshalBytes(src []byte) { - i.Len = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4]) - src = src[1*(4):] - i.Ptr = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (i *IFConf) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IFConf) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(i)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IFConf) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(i), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (i *IFConf) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (i *IFConf) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (i *IFConf) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i - // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (ifr *IFReq) SizeBytes() int { return 0 + 1*IFNAMSIZ + @@ -4849,175 +4751,50 @@ func (ifr *IFReq) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (tn *TableName) SizeBytes() int { - return 1 * XT_TABLE_MAXNAMELEN -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (tn *TableName) MarshalBytes(dst []byte) { - for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ { - dst[0] = byte(tn[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (tn *TableName) UnmarshalBytes(src []byte) { - for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ { - tn[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (tn *TableName) Packed() bool { - // Array newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (tn *TableName) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(tn)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (tn *TableName) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(tn), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) - hdr.Len = tn.SizeBytes() - hdr.Cap = tn.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that tn - // must live until the use above. - runtime.KeepAlive(tn) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (tn *TableName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return tn.CopyOutN(cc, addr, tn.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (tn *TableName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) - hdr.Len = tn.SizeBytes() - hdr.Cap = tn.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that tn - // must live until the use above. - runtime.KeepAlive(tn) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (tn *TableName) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) - hdr.Len = tn.SizeBytes() - hdr.Cap = tn.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that tn - // must live until the use above. - runtime.KeepAlive(tn) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IPTEntry) SizeBytes() int { +func (i *IFConf) SizeBytes() int { return 12 + - (*IPTIP)(nil).SizeBytes() + - (*XTCounters)(nil).SizeBytes() + 1*4 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IPTEntry) MarshalBytes(dst []byte) { - i.IP.MarshalBytes(dst[:i.IP.SizeBytes()]) - dst = dst[i.IP.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache)) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback)) +func (i *IFConf) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Len)) dst = dst[4:] - i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()]) - dst = dst[i.Counters.SizeBytes():] + // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0} + dst = dst[1*(4):] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Ptr)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IPTEntry) UnmarshalBytes(src []byte) { - i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()]) - src = src[i.IP.SizeBytes():] - i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (i *IFConf) UnmarshalBytes(src []byte) { + i.Len = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()]) - src = src[i.Counters.SizeBytes():] + // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4]) + src = src[1*(4):] + i.Ptr = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IPTEntry) Packed() bool { - return i.Counters.Packed() && i.IP.Packed() +func (i *IFConf) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IPTEntry) MarshalUnsafe(dst []byte) { - if i.Counters.Packed() && i.IP.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) - } else { - // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) - } +func (i *IFConf) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(i)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IPTEntry) UnmarshalUnsafe(src []byte) { - if i.Counters.Packed() && i.IP.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) - } else { - // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) - } +func (i *IFConf) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(i), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Counters.Packed() && i.IP.Packed() { - // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (i *IFConf) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -5034,23 +4811,13 @@ func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IFConf) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Counters.Packed() && i.IP.Packed() { - // Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (i *IFConf) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -5066,15 +4833,7 @@ func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error } // WriteTo implements io.WriterTo.WriteTo. -func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) { - if !i.Counters.Packed() && i.IP.Packed() { - // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (i *IFConf) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -5884,92 +5643,170 @@ func (en *ExtensionName) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *IP6TReplace) SizeBytes() int { - return 24 + - (*TableName)(nil).SizeBytes() + - 4*NF_INET_NUMHOOKS + - 4*NF_INET_NUMHOOKS +//go:nosplit +func (tn *TableName) SizeBytes() int { + return 1 * XT_TABLE_MAXNAMELEN } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *IP6TReplace) MarshalBytes(dst []byte) { - i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) - dst = dst[i.Name.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) - dst = dst[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) - dst = dst[4:] +func (tn *TableName) MarshalBytes(dst []byte) { + for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ { + dst[0] = byte(tn[idx]) + dst = dst[1:] } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) - dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (tn *TableName) UnmarshalBytes(src []byte) { + for idx := 0; idx < XT_TABLE_MAXNAMELEN; idx++ { + tn[idx] = src[0] + src = src[1:] } - usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (tn *TableName) Packed() bool { + // Array newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (tn *TableName) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(tn)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (tn *TableName) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(tn), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (tn *TableName) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) + hdr.Len = tn.SizeBytes() + hdr.Cap = tn.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that tn + // must live until the use above. + runtime.KeepAlive(tn) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (tn *TableName) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return tn.CopyOutN(cc, addr, tn.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (tn *TableName) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) + hdr.Len = tn.SizeBytes() + hdr.Cap = tn.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that tn + // must live until the use above. + runtime.KeepAlive(tn) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (tn *TableName) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tn))) + hdr.Len = tn.SizeBytes() + hdr.Cap = tn.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that tn + // must live until the use above. + runtime.KeepAlive(tn) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IPTEntry) SizeBytes() int { + return 12 + + (*IPTIP)(nil).SizeBytes() + + (*XTCounters)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IPTEntry) MarshalBytes(dst []byte) { + i.IP.MarshalBytes(dst[:i.IP.SizeBytes()]) + dst = dst[i.IP.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache)) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) - dst = dst[8:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback)) + dst = dst[4:] + i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()]) + dst = dst[i.Counters.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *IP6TReplace) UnmarshalBytes(src []byte) { - i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) - src = src[i.Name.SizeBytes():] - i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (i *IPTEntry) UnmarshalBytes(src []byte) { + i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()]) + src = src[i.IP.SizeBytes():] + i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { - i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - } - i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) + i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] - i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] + i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()]) + src = src[i.Counters.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *IP6TReplace) Packed() bool { - return i.Name.Packed() +func (i *IPTEntry) Packed() bool { + return i.Counters.Packed() && i.IP.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *IP6TReplace) MarshalUnsafe(dst []byte) { - if i.Name.Packed() { +func (i *IPTEntry) MarshalUnsafe(dst []byte) { + if i.Counters.Packed() && i.IP.Packed() { safecopy.CopyIn(dst, unsafe.Pointer(i)) } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. + // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes. i.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { - if i.Name.Packed() { +func (i *IPTEntry) UnmarshalUnsafe(src []byte) { + if i.Counters.Packed() && i.IP.Packed() { safecopy.CopyOut(unsafe.Pointer(i), src) } else { - // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. + // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes. i.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. +func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Counters.Packed() && i.IP.Packed() { + // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. i.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. @@ -5991,15 +5828,15 @@ func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. +func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Counters.Packed() && i.IP.Packed() { + // Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes. buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a @@ -6023,9 +5860,9 @@ func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { - if !i.Name.Packed() { - // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. +func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) { + if !i.Counters.Packed() && i.IP.Packed() { + // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes. buf := make([]byte, i.SizeBytes()) i.MarshalBytes(buf) length, err := writer.Write(buf) @@ -6395,6 +6232,169 @@ func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (i *IP6TReplace) SizeBytes() int { + return 24 + + (*TableName)(nil).SizeBytes() + + 4*NF_INET_NUMHOOKS + + 4*NF_INET_NUMHOOKS +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (i *IP6TReplace) MarshalBytes(dst []byte) { + i.Name.MarshalBytes(dst[:i.Name.SizeBytes()]) + dst = dst[i.Name.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size)) + dst = dst[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx])) + dst = dst[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx])) + dst = dst[4:] + } + usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters)) + dst = dst[8:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (i *IP6TReplace) UnmarshalBytes(src []byte) { + i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()]) + src = src[i.Name.SizeBytes():] + i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Size = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + for idx := 0; idx < NF_INET_NUMHOOKS; idx++ { + i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + } + i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (i *IP6TReplace) Packed() bool { + return i.Name.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (i *IP6TReplace) MarshalUnsafe(dst []byte) { + if i.Name.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (i *IP6TReplace) UnmarshalUnsafe(src []byte) { + if i.Name.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) { + if !i.Name.Packed() { + // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i + // must live until the use above. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrNetlink) SizeBytes() int { return 12 } @@ -8449,120 +8449,368 @@ func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SockAddrLink) SizeBytes() int { - return 12 + - 1*8 +func (t *TCPInfo) SizeBytes() int { + return 192 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SockAddrLink) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) - dst = dst[2:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol)) - dst = dst[2:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex)) - dst = dst[4:] - usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType)) - dst = dst[2:] - dst[0] = byte(s.PacketType) +func (t *TCPInfo) MarshalBytes(dst []byte) { + dst[0] = byte(t.State) dst = dst[1:] - dst[0] = byte(s.HardwareAddrLen) + dst[0] = byte(t.CaState) dst = dst[1:] - for idx := 0; idx < 8; idx++ { - dst[0] = byte(s.HardwareAddr[idx]) - dst = dst[1:] - } + dst[0] = byte(t.Retransmits) + dst = dst[1:] + dst[0] = byte(t.Probes) + dst = dst[1:] + dst[0] = byte(t.Backoff) + dst = dst[1:] + dst[0] = byte(t.Options) + dst = dst[1:] + dst[0] = byte(t.WindowScale) + dst = dst[1:] + dst[0] = byte(t.DeliveryRateAppLimited) + dst = dst[1:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut)) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SockAddrLink) UnmarshalBytes(src []byte) { - s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2])) - src = src[2:] - s.PacketType = src[0] +func (t *TCPInfo) UnmarshalBytes(src []byte) { + t.State = uint8(src[0]) src = src[1:] - s.HardwareAddrLen = src[0] + t.CaState = uint8(src[0]) src = src[1:] - for idx := 0; idx < 8; idx++ { - s.HardwareAddr[idx] = src[0] - src = src[1:] - } + t.Retransmits = uint8(src[0]) + src = src[1:] + t.Probes = uint8(src[0]) + src = src[1:] + t.Backoff = uint8(src[0]) + src = src[1:] + t.Options = uint8(src[0]) + src = src[1:] + t.WindowScale = uint8(src[0]) + src = src[1:] + t.DeliveryRateAppLimited = uint8(src[0]) + src = src[1:] + t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SockAddrLink) Packed() bool { +func (t *TCPInfo) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SockAddrLink) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) +func (t *TCPInfo) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(t)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SockAddrLink) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) +func (t *TCPInfo) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(t), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) +func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return t.CopyOutN(cc, addr, t.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) { +func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (c *ControlMessageCredentials) SizeBytes() int { + return 12 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (c *ControlMessageCredentials) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID)) + dst = dst[4:] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) { + c.PID = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + c.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (c *ControlMessageCredentials) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(c)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(c), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return c.CopyOutN(cc, addr, c.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) + hdr.Len = c.SizeBytes() + hdr.Cap = c.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that c + // must live until the use above. + runtime.KeepAlive(c) // escapes: replaced by intrinsic. return int64(length), err } @@ -8662,6 +8910,124 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SockAddrLink) SizeBytes() int { + return 12 + + 1*8 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SockAddrLink) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family)) + dst = dst[2:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol)) + dst = dst[2:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex)) + dst = dst[4:] + usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType)) + dst = dst[2:] + dst[0] = byte(s.PacketType) + dst = dst[1:] + dst[0] = byte(s.HardwareAddrLen) + dst = dst[1:] + for idx := 0; idx < 8; idx++ { + dst[0] = byte(s.HardwareAddr[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SockAddrLink) UnmarshalBytes(src []byte) { + s.Family = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2])) + src = src[2:] + s.PacketType = src[0] + src = src[1:] + s.HardwareAddrLen = src[0] + src = src[1:] + for idx := 0; idx < 8; idx++ { + s.HardwareAddr[idx] = src[0] + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SockAddrLink) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SockAddrLink) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SockAddrLink) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SockAddrUnix) SizeBytes() int { return 2 + 1*UnixPathMax @@ -8853,221 +9219,41 @@ func (l *Linger) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (t *TCPInfo) SizeBytes() int { - return 192 +//go:nosplit +func (t *TimeT) SizeBytes() int { + return 8 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *TCPInfo) MarshalBytes(dst []byte) { - dst[0] = byte(t.State) - dst = dst[1:] - dst[0] = byte(t.CaState) - dst = dst[1:] - dst[0] = byte(t.Retransmits) - dst = dst[1:] - dst[0] = byte(t.Probes) - dst = dst[1:] - dst[0] = byte(t.Backoff) - dst = dst[1:] - dst[0] = byte(t.Options) - dst = dst[1:] - dst[0] = byte(t.WindowScale) - dst = dst[1:] - dst[0] = byte(t.DeliveryRateAppLimited) - dst = dst[1:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut)) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited)) - dst = dst[8:] +func (t *TimeT) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(*t)) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *TCPInfo) UnmarshalBytes(src []byte) { - t.State = uint8(src[0]) - src = src[1:] - t.CaState = uint8(src[0]) - src = src[1:] - t.Retransmits = uint8(src[0]) - src = src[1:] - t.Probes = uint8(src[0]) - src = src[1:] - t.Backoff = uint8(src[0]) - src = src[1:] - t.Options = uint8(src[0]) - src = src[1:] - t.WindowScale = uint8(src[0]) - src = src[1:] - t.DeliveryRateAppLimited = uint8(src[0]) - src = src[1:] - t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] +func (t *TimeT) UnmarshalBytes(src []byte) { + *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8]))) } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (t *TCPInfo) Packed() bool { +func (t *TimeT) Packed() bool { + // Scalar newtypes are always packed. return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *TCPInfo) MarshalUnsafe(dst []byte) { +func (t *TimeT) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(t)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *TCPInfo) UnmarshalUnsafe(src []byte) { +func (t *TimeT) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(t), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -9084,13 +9270,13 @@ func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return t.CopyOutN(cc, addr, t.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -9106,7 +9292,7 @@ func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) } // WriteTo implements io.WriterTo.WriteTo. -func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) { +func (t *TimeT) WriteTo(w io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -9114,7 +9300,7 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) { hdr.Len = t.SizeBytes() hdr.Cap = t.SizeBytes() - length, err := writer.Write(buf) + length, err := w.Write(buf) // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. runtime.KeepAlive(t) // escapes: replaced by intrinsic. @@ -9122,103 +9308,6 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (c *ControlMessageCredentials) SizeBytes() int { - return 12 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (c *ControlMessageCredentials) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID)) - dst = dst[4:] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) { - c.PID = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - c.UID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - c.GID = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (c *ControlMessageCredentials) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(c)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(c), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return c.CopyOutN(cc, addr, c.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c))) - hdr.Len = c.SizeBytes() - hdr.Cap = c.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that c - // must live until the use above. - runtime.KeepAlive(c) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (ts *Timespec) SizeBytes() int { return 16 } @@ -9577,117 +9666,127 @@ func UnmarshalUnsafeTimevalSlice(dst []Timeval, src []byte) (int, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *ItimerVal) SizeBytes() int { +func (t *Tms) SizeBytes() int { return 0 + - (*Timeval)(nil).SizeBytes() + - (*Timeval)(nil).SizeBytes() + (*ClockT)(nil).SizeBytes() + + (*ClockT)(nil).SizeBytes() + + (*ClockT)(nil).SizeBytes() + + (*ClockT)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *ItimerVal) MarshalBytes(dst []byte) { - i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) - dst = dst[i.Interval.SizeBytes():] - i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) - dst = dst[i.Value.SizeBytes():] +func (t *Tms) MarshalBytes(dst []byte) { + t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()]) + dst = dst[t.UTime.SizeBytes():] + t.STime.MarshalBytes(dst[:t.STime.SizeBytes()]) + dst = dst[t.STime.SizeBytes():] + t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()]) + dst = dst[t.CUTime.SizeBytes():] + t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()]) + dst = dst[t.CSTime.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *ItimerVal) UnmarshalBytes(src []byte) { - i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) - src = src[i.Interval.SizeBytes():] - i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) - src = src[i.Value.SizeBytes():] +func (t *Tms) UnmarshalBytes(src []byte) { + t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()]) + src = src[t.UTime.SizeBytes():] + t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()]) + src = src[t.STime.SizeBytes():] + t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()]) + src = src[t.CUTime.SizeBytes():] + t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()]) + src = src[t.CSTime.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *ItimerVal) Packed() bool { - return i.Interval.Packed() && i.Value.Packed() +func (t *Tms) Packed() bool { + return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *ItimerVal) MarshalUnsafe(dst []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) +func (t *Tms) MarshalUnsafe(dst []byte) { + if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(t)) } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) + // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes. + t.MarshalBytes(dst) } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *ItimerVal) UnmarshalUnsafe(src []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) +func (t *Tms) UnmarshalUnsafe(src []byte) { + if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { + safecopy.CopyOut(unsafe.Pointer(t), src) } else { - // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) + // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes. + t.UnmarshalBytes(src) } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. +func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { + // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay. + t.MarshalBytes(buf) // escapes: fallback. return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) +func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return t.CopyOutN(cc, addr, t.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. +func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { + // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay. length, err := cc.CopyInBytes(addr, buf) // escapes: okay. // Unmarshal unconditionally. If we had a short copy-in, this results in a // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. + t.UnmarshalBytes(buf) // escapes: fallback. return length, err } // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) +func (t *Tms) WriteTo(writer io.Writer) (int64, error) { + if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { + // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, t.SizeBytes()) + t.MarshalBytes(buf) length, err := writer.Write(buf) return int64(length), err } @@ -9695,14 +9794,14 @@ func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) + hdr.Len = t.SizeBytes() + hdr.Cap = t.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that t // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(t) // escapes: replaced by intrinsic. return int64(length), err } @@ -9804,184 +9903,262 @@ func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (u *Utime) SizeBytes() int { - return 16 +func (i *Itimerspec) SizeBytes() int { + return 0 + + (*Timespec)(nil).SizeBytes() + + (*Timespec)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Utime) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) - dst = dst[8:] +func (i *Itimerspec) MarshalBytes(dst []byte) { + i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) + dst = dst[i.Interval.SizeBytes():] + i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) + dst = dst[i.Value.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Utime) UnmarshalBytes(src []byte) { - u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] +func (i *Itimerspec) UnmarshalBytes(src []byte) { + i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) + src = src[i.Interval.SizeBytes():] + i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) + src = src[i.Value.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (u *Utime) Packed() bool { - return true +func (i *Itimerspec) Packed() bool { + return i.Interval.Packed() && i.Value.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Utime) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) +func (i *Itimerspec) MarshalUnsafe(dst []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type Itimerspec doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Utime) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) +func (i *Itimerspec) UnmarshalUnsafe(src []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type Itimerspec doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) +func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (u *Utime) WriteTo(writer io.Writer) (int64, error) { +func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return int64(length), err } // SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit -func (t *TimeT) SizeBytes() int { - return 8 +func (i *ItimerVal) SizeBytes() int { + return 0 + + (*Timeval)(nil).SizeBytes() + + (*Timeval)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *TimeT) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(*t)) +func (i *ItimerVal) MarshalBytes(dst []byte) { + i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) + dst = dst[i.Interval.SizeBytes():] + i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) + dst = dst[i.Value.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *TimeT) UnmarshalBytes(src []byte) { - *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8]))) +func (i *ItimerVal) UnmarshalBytes(src []byte) { + i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) + src = src[i.Interval.SizeBytes():] + i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) + src = src[i.Value.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (t *TimeT) Packed() bool { - // Scalar newtypes are always packed. - return true +func (i *ItimerVal) Packed() bool { + return i.Interval.Packed() && i.Value.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *TimeT) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(t)) +func (i *ItimerVal) MarshalUnsafe(dst []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(i)) + } else { + // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes. + i.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *TimeT) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(t), src) +func (i *ItimerVal) UnmarshalUnsafe(src []byte) { + if i.Interval.Packed() && i.Value.Packed() { + safecopy.CopyOut(unsafe.Pointer(i), src) + } else { + // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes. + i.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + i.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return t.CopyOutN(cc, addr, t.SizeBytes()) +func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return i.CopyOutN(cc, addr, i.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + i.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (t *TimeT) WriteTo(w io.Writer) (int64, error) { +func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) { + if !i.Interval.Packed() && i.Value.Packed() { + // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, i.SizeBytes()) + i.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) + hdr.Len = i.SizeBytes() + hdr.Cap = i.SizeBytes() - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that t + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that i // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. + runtime.KeepAlive(i) // escapes: replaced by intrinsic. return int64(length), err } @@ -10075,146 +10252,6 @@ func (c *ClockT) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (t *Tms) SizeBytes() int { - return 0 + - (*ClockT)(nil).SizeBytes() + - (*ClockT)(nil).SizeBytes() + - (*ClockT)(nil).SizeBytes() + - (*ClockT)(nil).SizeBytes() -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (t *Tms) MarshalBytes(dst []byte) { - t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()]) - dst = dst[t.UTime.SizeBytes():] - t.STime.MarshalBytes(dst[:t.STime.SizeBytes()]) - dst = dst[t.STime.SizeBytes():] - t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()]) - dst = dst[t.CUTime.SizeBytes():] - t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()]) - dst = dst[t.CSTime.SizeBytes():] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (t *Tms) UnmarshalBytes(src []byte) { - t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()]) - src = src[t.UTime.SizeBytes():] - t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()]) - src = src[t.STime.SizeBytes():] - t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()]) - src = src[t.CUTime.SizeBytes():] - t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()]) - src = src[t.CSTime.SizeBytes():] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (t *Tms) Packed() bool { - return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (t *Tms) MarshalUnsafe(dst []byte) { - if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(t)) - } else { - // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes. - t.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (t *Tms) UnmarshalUnsafe(src []byte) { - if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { - safecopy.CopyOut(unsafe.Pointer(t), src) - } else { - // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes. - t.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { - // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay. - t.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return t.CopyOutN(cc, addr, t.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { - // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - t.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (t *Tms) WriteTo(writer io.Writer) (int64, error) { - if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() { - // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, t.SizeBytes()) - t.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t))) - hdr.Len = t.SizeBytes() - hdr.Cap = t.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that t - // must live until the use above. - runtime.KeepAlive(t) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit func (t *TimerID) SizeBytes() int { return 4 @@ -10304,132 +10341,95 @@ func (t *TimerID) WriteTo(w io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (i *Itimerspec) SizeBytes() int { - return 0 + - (*Timespec)(nil).SizeBytes() + - (*Timespec)(nil).SizeBytes() +func (u *Utime) SizeBytes() int { + return 16 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (i *Itimerspec) MarshalBytes(dst []byte) { - i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()]) - dst = dst[i.Interval.SizeBytes():] - i.Value.MarshalBytes(dst[:i.Value.SizeBytes()]) - dst = dst[i.Value.SizeBytes():] +func (u *Utime) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime)) + dst = dst[8:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (i *Itimerspec) UnmarshalBytes(src []byte) { - i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()]) - src = src[i.Interval.SizeBytes():] - i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()]) - src = src[i.Value.SizeBytes():] +func (u *Utime) UnmarshalBytes(src []byte) { + u.Actime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (i *Itimerspec) Packed() bool { - return i.Interval.Packed() && i.Value.Packed() +func (u *Utime) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (i *Itimerspec) MarshalUnsafe(dst []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(i)) - } else { - // Type Itimerspec doesn't have a packed layout in memory, fallback to MarshalBytes. - i.MarshalBytes(dst) - } +func (u *Utime) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(u)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (i *Itimerspec) UnmarshalUnsafe(src []byte) { - if i.Interval.Packed() && i.Value.Packed() { - safecopy.CopyOut(unsafe.Pointer(i), src) - } else { - // Type Itimerspec doesn't have a packed layout in memory, fallback to UnmarshalBytes. - i.UnmarshalBytes(src) - } +func (u *Utime) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(u), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - i.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that u // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. return length, err } // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return i.CopyOutN(cc, addr, i.SizeBytes()) +func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - i.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that u // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. return length, err } // WriteTo implements io.WriterTo.WriteTo. -func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) { - if !i.Interval.Packed() && i.Value.Packed() { - // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, i.SizeBytes()) - i.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (u *Utime) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i))) - hdr.Len = i.SizeBytes() - hdr.Cap = i.SizeBytes() + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that i + // Since we bypassed the compiler's escape analysis, indicate that u // must live until the use above. - runtime.KeepAlive(i) // escapes: replaced by intrinsic. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. return int64(length), err } diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go index 469fb972f..e198e8ac2 100644 --- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go +++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go @@ -25,183 +25,6 @@ var _ marshal.Marshallable = (*Uint8)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. //go:nosplit -func (u *Uint8) SizeBytes() int { - return 1 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *Uint8) MarshalBytes(dst []byte) { - dst[0] = byte(*u) -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *Uint8) UnmarshalBytes(src []byte) { - *u = Uint8(uint8(src[0])) -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (u *Uint8) Packed() bool { - // Scalar newtypes are always packed. - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *Uint8) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(u)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *Uint8) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(u), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (u *Uint8) WriteTo(w io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := w.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return int64(length), err -} - -// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. -//go:nosplit -func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { - count := len(dst) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&dst) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(val) - hdr.Len = size * count - hdr.Cap = size * count - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that dst - // must live until the use above. - runtime.KeepAlive(dst) // escapes: replaced by intrinsic. - return length, err -} - -// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. -//go:nosplit -func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { - count := len(src) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&src) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(val) - hdr.Len = size * count - hdr.Cap = size * count - - length, err := cc.CopyOutBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that src - // must live until the use above. - runtime.KeepAlive(src) // escapes: replaced by intrinsic. - return length, err -} - -// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. -func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { - count := len(src) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&src) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - length, err := safecopy.CopyIn(dst[:(size*count)], val) - // Since we bypassed the compiler's escape analysis, indicate that src - // must live until the use above. - runtime.KeepAlive(src) // escapes: replaced by intrinsic. - return length, err -} - -// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. -func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { - count := len(dst) - if count == 0 { - return 0, nil - } - size := (*Uint8)(nil).SizeBytes() - - ptr := unsafe.Pointer(&dst) - val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) - - length, err := safecopy.CopyOut(val, src[:(size*count)]) - // Since we bypassed the compiler's escape analysis, indicate that dst - // must live until the use above. - runtime.KeepAlive(dst) // escapes: replaced by intrinsic. - return length, err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. -//go:nosplit func (i *Int16) SizeBytes() int { return 2 } @@ -1439,3 +1262,180 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) { return length, err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +//go:nosplit +func (u *Uint8) SizeBytes() int { + return 1 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *Uint8) MarshalBytes(dst []byte) { + dst[0] = byte(*u) +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *Uint8) UnmarshalBytes(src []byte) { + *u = Uint8(uint8(src[0])) +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *Uint8) Packed() bool { + // Scalar newtypes are always packed. + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *Uint8) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(u)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *Uint8) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(u), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *Uint8) WriteTo(w io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := w.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + +// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory. +//go:nosplit +func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) { + count := len(dst) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&dst) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(val) + hdr.Len = size * count + hdr.Cap = size * count + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that dst + // must live until the use above. + runtime.KeepAlive(dst) // escapes: replaced by intrinsic. + return length, err +} + +// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory. +//go:nosplit +func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) { + count := len(src) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&src) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(val) + hdr.Len = size * count + hdr.Cap = size * count + + length, err := cc.CopyOutBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that src + // must live until the use above. + runtime.KeepAlive(src) // escapes: replaced by intrinsic. + return length, err +} + +// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8. +func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) { + count := len(src) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&src) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + length, err := safecopy.CopyIn(dst[:(size*count)], val) + // Since we bypassed the compiler's escape analysis, indicate that src + // must live until the use above. + runtime.KeepAlive(src) // escapes: replaced by intrinsic. + return length, err +} + +// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8. +func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) { + count := len(dst) + if count == 0 { + return 0, nil + } + size := (*Uint8)(nil).SizeBytes() + + ptr := unsafe.Pointer(&dst) + val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) + + length, err := safecopy.CopyOut(val, src[:(size*count)]) + // Since we bypassed the compiler's escape analysis, indicate that dst + // must live until the use above. + runtime.KeepAlive(dst) // escapes: replaced by intrinsic. + return length, err +} + diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go index fe68f921d..2ddabb10b 100644 --- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go @@ -23,71 +23,53 @@ var _ marshal.Marshallable = (*SignalStack)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalAct) SizeBytes() int { - return 24 + - (*linux.SignalSet)(nil).SizeBytes() +func (s *SignalStack) SizeBytes() int { + return 24 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalAct) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Handler)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags)) +func (s *SignalStack) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr)) dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer)) + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flags)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size)) dst = dst[8:] - s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()]) - dst = dst[s.Mask.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalAct) UnmarshalBytes(src []byte) { - s.Handler = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) +func (s *SignalStack) UnmarshalBytes(src []byte) { + s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - s.Restorer = uint64(usermem.ByteOrder.Uint64(src[:8])) + s.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + s.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) src = src[8:] - s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()]) - src = src[s.Mask.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SignalAct) Packed() bool { - return s.Mask.Packed() +func (s *SignalStack) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalAct) MarshalUnsafe(dst []byte) { - if s.Mask.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SignalAct doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } +func (s *SignalStack) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalAct) UnmarshalUnsafe(src []byte) { - if s.Mask.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SignalAct doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } +func (s *SignalStack) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Mask.Packed() { - // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -104,23 +86,13 @@ func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Mask.Packed() { - // Type SignalAct doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -136,15 +108,7 @@ func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, erro } // WriteTo implements io.WriterTo.WriteTo. -func (s *SignalAct) WriteTo(writer io.Writer) (int64, error) { - if !s.Mask.Packed() { - // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -160,53 +124,62 @@ func (s *SignalAct) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalStack) SizeBytes() int { - return 24 +func (s *SignalInfo) SizeBytes() int { + return 16 + + 1*(128-16) } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalStack) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flags)) +func (s *SignalInfo) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) dst = dst[4:] // Padding: dst[:sizeof(uint32)] ~= uint32(0) dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size)) - dst = dst[8:] + for idx := 0; idx < (128-16); idx++ { + dst[0] = byte(s.Fields[idx]) + dst = dst[1:] + } } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalStack) UnmarshalBytes(src []byte) { - s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (s *SignalInfo) UnmarshalBytes(src []byte) { + s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ uint32 ~= src[:sizeof(uint32)] src = src[4:] - s.Size = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] + for idx := 0; idx < (128-16); idx++ { + s.Fields[idx] = src[0] + src = src[1:] + } } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SignalStack) Packed() bool { +func (s *SignalInfo) Packed() bool { return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalStack) MarshalUnsafe(dst []byte) { +func (s *SignalInfo) MarshalUnsafe(dst []byte) { safecopy.CopyIn(dst, unsafe.Pointer(s)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalStack) UnmarshalUnsafe(src []byte) { +func (s *SignalInfo) UnmarshalUnsafe(src []byte) { safecopy.CopyOut(unsafe.Pointer(s), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -223,13 +196,13 @@ func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -245,7 +218,7 @@ func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er } // WriteTo implements io.WriterTo.WriteTo. -func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { +func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -261,62 +234,71 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalInfo) SizeBytes() int { - return 16 + - 1*(128-16) +func (s *SignalAct) SizeBytes() int { + return 24 + + (*linux.SignalSet)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalInfo) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - for idx := 0; idx < (128-16); idx++ { - dst[0] = byte(s.Fields[idx]) - dst = dst[1:] - } +func (s *SignalAct) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Handler)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer)) + dst = dst[8:] + s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()]) + dst = dst[s.Mask.SizeBytes():] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalInfo) UnmarshalBytes(src []byte) { - s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - for idx := 0; idx < (128-16); idx++ { - s.Fields[idx] = src[0] - src = src[1:] - } +func (s *SignalAct) UnmarshalBytes(src []byte) { + s.Handler = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Restorer = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()]) + src = src[s.Mask.SizeBytes():] } // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (s *SignalInfo) Packed() bool { - return true +func (s *SignalAct) Packed() bool { + return s.Mask.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalInfo) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) +func (s *SignalAct) MarshalUnsafe(dst []byte) { + if s.Mask.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SignalAct doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalInfo) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) +func (s *SignalAct) UnmarshalUnsafe(src []byte) { + if s.Mask.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SignalAct doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Mask.Packed() { + // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -333,13 +315,23 @@ func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return s.CopyOutN(cc, addr, s.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Mask.Packed() { + // Type SignalAct doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -355,7 +347,15 @@ func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err } // WriteTo implements io.WriterTo.WriteTo. -func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { +func (s *SignalAct) WriteTo(writer io.Writer) (int64, error) { + if !s.Mask.Packed() { + // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go index 95a73e115..aac25375e 100644 --- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go @@ -27,167 +27,6 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (u *UContext64) SizeBytes() int { - return 16 + - (*SignalStack)(nil).SizeBytes() + - (*linux.SignalSet)(nil).SizeBytes() + - 1*120 + - 1*8 + - (*SignalContext64)(nil).SizeBytes() -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (u *UContext64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link)) - dst = dst[8:] - u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()]) - dst = dst[u.Stack.SizeBytes():] - u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()]) - dst = dst[u.Sigset.SizeBytes():] - for idx := 0; idx < 120; idx++ { - dst[0] = byte(u._pad[idx]) - dst = dst[1:] - } - for idx := 0; idx < 8; idx++ { - dst[0] = byte(u._pad2[idx]) - dst = dst[1:] - } - u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()]) - dst = dst[u.MContext.SizeBytes():] -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (u *UContext64) UnmarshalBytes(src []byte) { - u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Link = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()]) - src = src[u.Stack.SizeBytes():] - u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()]) - src = src[u.Sigset.SizeBytes():] - for idx := 0; idx < 120; idx++ { - u._pad[idx] = src[0] - src = src[1:] - } - for idx := 0; idx < 8; idx++ { - u._pad2[idx] = src[0] - src = src[1:] - } - u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()]) - src = src[u.MContext.SizeBytes():] -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (u *UContext64) Packed() bool { - return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (u *UContext64) MarshalUnsafe(dst []byte) { - if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(u)) - } else { - // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. - u.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (u *UContext64) UnmarshalUnsafe(src []byte) { - if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - safecopy.CopyOut(unsafe.Pointer(u), src) - } else { - // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - u.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. - u.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return u.CopyOutN(cc, addr, u.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - u.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { - if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { - // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, u.SizeBytes()) - u.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) - hdr.Len = u.SizeBytes() - hdr.Cap = u.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that u - // must live until the use above. - runtime.KeepAlive(u) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SignalContext64) SizeBytes() int { return 32 + 8*31 + @@ -590,3 +429,164 @@ func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (u *UContext64) SizeBytes() int { + return 16 + + (*SignalStack)(nil).SizeBytes() + + (*linux.SignalSet)(nil).SizeBytes() + + 1*120 + + 1*8 + + (*SignalContext64)(nil).SizeBytes() +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (u *UContext64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link)) + dst = dst[8:] + u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()]) + dst = dst[u.Stack.SizeBytes():] + u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()]) + dst = dst[u.Sigset.SizeBytes():] + for idx := 0; idx < 120; idx++ { + dst[0] = byte(u._pad[idx]) + dst = dst[1:] + } + for idx := 0; idx < 8; idx++ { + dst[0] = byte(u._pad2[idx]) + dst = dst[1:] + } + u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()]) + dst = dst[u.MContext.SizeBytes():] +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (u *UContext64) UnmarshalBytes(src []byte) { + u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Link = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()]) + src = src[u.Stack.SizeBytes():] + u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()]) + src = src[u.Sigset.SizeBytes():] + for idx := 0; idx < 120; idx++ { + u._pad[idx] = src[0] + src = src[1:] + } + for idx := 0; idx < 8; idx++ { + u._pad2[idx] = src[0] + src = src[1:] + } + u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()]) + src = src[u.MContext.SizeBytes():] +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (u *UContext64) Packed() bool { + return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (u *UContext64) MarshalUnsafe(dst []byte) { + if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(u)) + } else { + // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. + u.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (u *UContext64) UnmarshalUnsafe(src []byte) { + if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + safecopy.CopyOut(unsafe.Pointer(u), src) + } else { + // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + u.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. + u.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return u.CopyOutN(cc, addr, u.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + u.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { + if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() { + // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, u.SizeBytes()) + u.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u))) + hdr.Len = u.SizeBytes() + hdr.Cap = u.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that u + // must live until the use above. + runtime.KeepAlive(u) // escapes: replaced by intrinsic. + return int64(length), err +} + |