summaryrefslogtreecommitdiffhomepage
path: root/pkg/abi
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/abi')
-rw-r--r--pkg/abi/linux/errqueue.go93
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go4370
-rw-r--r--pkg/abi/linux/sem.go2
3 files changed, 2467 insertions, 1998 deletions
diff --git a/pkg/abi/linux/errqueue.go b/pkg/abi/linux/errqueue.go
new file mode 100644
index 000000000..3905d4222
--- /dev/null
+++ b/pkg/abi/linux/errqueue.go
@@ -0,0 +1,93 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/marshal"
+)
+
+// Socket error origin codes as defined in include/uapi/linux/errqueue.h.
+const (
+ SO_EE_ORIGIN_NONE = 0
+ SO_EE_ORIGIN_LOCAL = 1
+ SO_EE_ORIGIN_ICMP = 2
+ SO_EE_ORIGIN_ICMP6 = 3
+)
+
+// SockExtendedErr represents struct sock_extended_err in Linux defined in
+// include/uapi/linux/errqueue.h.
+//
+// +marshal
+type SockExtendedErr struct {
+ Errno uint32
+ Origin uint8
+ Type uint8
+ Code uint8
+ Pad uint8
+ Info uint32
+ Data uint32
+}
+
+// SockErrCMsg represents the IP*_RECVERR control message.
+type SockErrCMsg interface {
+ marshal.Marshallable
+
+ CMsgLevel() uint32
+ CMsgType() uint32
+}
+
+// SockErrCMsgIPv4 is the IP_RECVERR control message used in
+// recvmsg(MSG_ERRQUEUE) by ipv4 sockets. This is equilavent to `struct errhdr`
+// defined in net/ipv4/ip_sockglue.c:ip_recv_error().
+//
+// +marshal
+type SockErrCMsgIPv4 struct {
+ SockExtendedErr
+ Offender SockAddrInet
+}
+
+var _ SockErrCMsg = (*SockErrCMsgIPv4)(nil)
+
+// CMsgLevel implements SockErrCMsg.CMsgLevel.
+func (*SockErrCMsgIPv4) CMsgLevel() uint32 {
+ return SOL_IP
+}
+
+// CMsgType implements SockErrCMsg.CMsgType.
+func (*SockErrCMsgIPv4) CMsgType() uint32 {
+ return IP_RECVERR
+}
+
+// SockErrCMsgIPv6 is the IPV6_RECVERR control message used in
+// recvmsg(MSG_ERRQUEUE) by ipv6 sockets. This is equilavent to `struct errhdr`
+// defined in net/ipv6/datagram.c:ipv6_recv_error().
+//
+// +marshal
+type SockErrCMsgIPv6 struct {
+ SockExtendedErr
+ Offender SockAddrInet6
+}
+
+var _ SockErrCMsg = (*SockErrCMsgIPv6)(nil)
+
+// CMsgLevel implements SockErrCMsg.CMsgLevel.
+func (*SockErrCMsgIPv6) CMsgLevel() uint32 {
+ return SOL_IPV6
+}
+
+// CMsgType implements SockErrCMsg.CMsgType.
+func (*SockErrCMsgIPv6) CMsgType() uint32 {
+ return IPV6_RECVERR
+}
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 11c782ce9..ead4bbcc2 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -80,6 +80,9 @@ var _ marshal.Marshallable = (*SockAddrInet6)(nil)
var _ marshal.Marshallable = (*SockAddrLink)(nil)
var _ marshal.Marshallable = (*SockAddrNetlink)(nil)
var _ marshal.Marshallable = (*SockAddrUnix)(nil)
+var _ marshal.Marshallable = (*SockErrCMsgIPv4)(nil)
+var _ marshal.Marshallable = (*SockErrCMsgIPv6)(nil)
+var _ marshal.Marshallable = (*SockExtendedErr)(nil)
var _ marshal.Marshallable = (*Statfs)(nil)
var _ marshal.Marshallable = (*Statx)(nil)
var _ marshal.Marshallable = (*StatxTimestamp)(nil)
@@ -797,67 +800,418 @@ func UnmarshalUnsafeCapUserDataSlice(dst []CapUserData, src []byte) (int, error)
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *Flock) SizeBytes() int {
- return 24 +
- 1*4 +
- 1*4
+func (s *SockExtendedErr) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *Flock) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Type))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Whence))
- dst = dst[2:]
- // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
- dst = dst[1*(4):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Start))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Len))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Pid))
+func (s *SockExtendedErr) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ dst[0] = byte(s.Origin)
+ dst = dst[1:]
+ dst[0] = byte(s.Type)
+ dst = dst[1:]
+ dst[0] = byte(s.Code)
+ dst = dst[1:]
+ dst[0] = byte(s.Pad)
+ dst = dst[1:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Info))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Data))
dst = dst[4:]
- // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
- dst = dst[1*(4):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *Flock) UnmarshalBytes(src []byte) {
- f.Type = int16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.Whence = int16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
- src = src[1*(4):]
- f.Start = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Len = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Pid = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (s *SockExtendedErr) UnmarshalBytes(src []byte) {
+ s.Errno = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Origin = uint8(src[0])
+ src = src[1:]
+ s.Type = uint8(src[0])
+ src = src[1:]
+ s.Code = uint8(src[0])
+ src = src[1:]
+ s.Pad = uint8(src[0])
+ src = src[1:]
+ s.Info = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Data = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
- src = src[1*(4):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *Flock) Packed() bool {
+func (s *SockExtendedErr) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *Flock) MarshalUnsafe(dst []byte) {
+func (s *SockExtendedErr) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockExtendedErr) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockExtendedErr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockExtendedErr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockExtendedErr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockExtendedErr) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockErrCMsgIPv4) SizeBytes() int {
+ return 0 +
+ (*SockExtendedErr)(nil).SizeBytes() +
+ (*SockAddrInet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockErrCMsgIPv4) MarshalBytes(dst []byte) {
+ s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()])
+ dst = dst[s.SockExtendedErr.SizeBytes():]
+ s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()])
+ dst = dst[s.Offender.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockErrCMsgIPv4) UnmarshalBytes(src []byte) {
+ s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()])
+ src = src[s.SockExtendedErr.SizeBytes():]
+ s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()])
+ src = src[s.Offender.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockErrCMsgIPv4) Packed() bool {
+ return s.Offender.Packed() && s.SockExtendedErr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockErrCMsgIPv4) MarshalUnsafe(dst []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockErrCMsgIPv4) UnmarshalUnsafe(src []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockErrCMsgIPv4) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockErrCMsgIPv4) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv4 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockErrCMsgIPv6) SizeBytes() int {
+ return 0 +
+ (*SockExtendedErr)(nil).SizeBytes() +
+ (*SockAddrInet6)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockErrCMsgIPv6) MarshalBytes(dst []byte) {
+ s.SockExtendedErr.MarshalBytes(dst[:s.SockExtendedErr.SizeBytes()])
+ dst = dst[s.SockExtendedErr.SizeBytes():]
+ s.Offender.MarshalBytes(dst[:s.Offender.SizeBytes()])
+ dst = dst[s.Offender.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockErrCMsgIPv6) UnmarshalBytes(src []byte) {
+ s.SockExtendedErr.UnmarshalBytes(src[:s.SockExtendedErr.SizeBytes()])
+ src = src[s.SockExtendedErr.SizeBytes():]
+ s.Offender.UnmarshalBytes(src[:s.Offender.SizeBytes()])
+ src = src[s.Offender.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockErrCMsgIPv6) Packed() bool {
+ return s.Offender.Packed() && s.SockExtendedErr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockErrCMsgIPv6) MarshalUnsafe(dst []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockErrCMsgIPv6) UnmarshalUnsafe(src []byte) {
+ if s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockErrCMsgIPv6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockErrCMsgIPv6) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Offender.Packed() && s.SockExtendedErr.Packed() {
+ // Type SockErrCMsgIPv6 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FOwnerEx) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FOwnerEx) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FOwnerEx) UnmarshalBytes(src []byte) {
+ f.Type = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FOwnerEx) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FOwnerEx) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *Flock) UnmarshalUnsafe(src []byte) {
+func (f *FOwnerEx) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -874,13 +1228,13 @@ func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *Flock) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -896,7 +1250,7 @@ func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *Flock) WriteTo(writer io.Writer) (int64, error) {
+func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -912,45 +1266,67 @@ func (f *Flock) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FOwnerEx) SizeBytes() int {
- return 8
+func (f *Flock) SizeBytes() int {
+ return 24 +
+ 1*4 +
+ 1*4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FOwnerEx) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+func (f *Flock) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Type))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.Whence))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Start))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Len))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Pid))
dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FOwnerEx) UnmarshalBytes(src []byte) {
- f.Type = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *Flock) UnmarshalBytes(src []byte) {
+ f.Type = int16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.Whence = int16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ f.Start = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Len = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Pid = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ // Padding: ~ copy([4]byte(f._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FOwnerEx) Packed() bool {
+func (f *Flock) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FOwnerEx) MarshalUnsafe(dst []byte) {
+func (f *Flock) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FOwnerEx) UnmarshalUnsafe(src []byte) {
+func (f *Flock) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *Flock) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -967,13 +1343,13 @@ func (f *FOwnerEx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FOwnerEx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *Flock) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *Flock) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -989,7 +1365,7 @@ func (f *FOwnerEx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FOwnerEx) WriteTo(writer io.Writer) (int64, error) {
+func (f *Flock) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1352,301 +1728,19 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderOut) SizeBytes() int {
- return 8 +
- (*FUSEOpID)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
- dst = dst[4:]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (f *FUSEHeaderOut) Packed() bool {
- return f.Unique.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
- if f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
- if f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return f.CopyOutN(cc, addr, f.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEAttr) SizeBytes() int {
- return 88
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEAttr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEAttr) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (f *FUSEAttr) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return f.CopyOutN(cc, addr, f.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReadIn) SizeBytes() int {
+func (f *FUSEWriteIn) SizeBytes() int {
return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReadIn) MarshalBytes(dst []byte) {
+func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
@@ -1657,14 +1751,14 @@ func (f *FUSEReadIn) MarshalBytes(dst []byte) {
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
+func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
@@ -1676,23 +1770,23 @@ func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReadIn) Packed() bool {
+func (f *FUSEWriteIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1709,13 +1803,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1731,7 +1825,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1836,88 +1930,82 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderIn) SizeBytes() int {
- return 28 +
- (*FUSEOpcode)(nil).SizeBytes() +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSEInitOut) SizeBytes() int {
+ return 32 +
+ 4*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
- dst = dst[f.Opcode.SizeBytes():]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
+ dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
- src = src[f.Opcode.SizeBytes():]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
+ src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderIn) Packed() bool {
- return f.Opcode.Packed() && f.Unique.Packed()
+func (f *FUSEInitOut) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1934,23 +2022,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1966,15 +2044,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1990,53 +2060,101 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitIn) SizeBytes() int {
- return 16
+func (f *FUSEAttr) SizeBytes() int {
+ return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
+func (f *FUSEAttr) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEAttr) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitIn) Packed() bool {
+func (f *FUSEAttr) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2053,13 +2171,13 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2075,7 +2193,7 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2240,53 +2358,49 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReleaseIn) SizeBytes() int {
- return 24
+func (f *FUSEOpenOut) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReleaseIn) Packed() bool {
+func (f *FUSEOpenOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2303,13 +2417,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2325,7 +2439,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2341,53 +2455,41 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSECreateMeta) SizeBytes() int {
- return 16
+//go:nosplit
+func (f *FUSEOpcode) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
+func (f *FUSEOpcode) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
+func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
+ *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSECreateMeta) Packed() bool {
+func (f *FUSEOpcode) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2404,13 +2506,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2426,7 +2528,7 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2434,7 +2536,7 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -2442,13 +2544,12 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitOut) SizeBytes() int {
- return 32 +
- 4*8
+func (f *FUSEInitIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+func (f *FUSEInitIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
@@ -2457,24 +2558,10 @@ func (f *FUSEInitOut) MarshalBytes(dst []byte) {
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
- dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
- dst = dst[2:]
- // Padding: dst[:sizeof(uint16)] ~= uint16(0)
- dst = dst[2:]
- // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
- dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
@@ -2483,41 +2570,27 @@ func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
src = src[4:]
f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- // Padding: var _ uint16 ~= src[:sizeof(uint16)]
- src = src[2:]
- // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
- src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitOut) Packed() bool {
+func (f *FUSEInitIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2534,13 +2607,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2556,7 +2629,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2669,116 +2742,19 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenOut) SizeBytes() int {
- return 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (f *FUSEOpenOut) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return f.CopyOutN(cc, addr, f.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
- hdr.Len = f.SizeBytes()
- hdr.Cap = f.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that f
- // must live until the use above.
- runtime.KeepAlive(f) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteIn) SizeBytes() int {
+func (f *FUSEReadIn) SizeBytes() int {
return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
+func (f *FUSEReadIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
@@ -2789,14 +2765,14 @@ func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
+func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
@@ -2808,23 +2784,23 @@ func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteIn) Packed() bool {
+func (f *FUSEReadIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2841,13 +2817,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2863,7 +2839,7 @@ func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2879,29 +2855,21 @@ func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMknodMeta) SizeBytes() int {
- return 16
+func (f *FUSEWriteOut) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2909,23 +2877,23 @@ func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMknodMeta) Packed() bool {
+func (f *FUSEWriteOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2942,13 +2910,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2964,7 +2932,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2980,101 +2948,45 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSESetAttrIn) SizeBytes() int {
- return 88
+func (f *FUSEMkdirMeta) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
+func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
- f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSESetAttrIn) Packed() bool {
+func (f *FUSEMkdirMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3091,13 +3003,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3113,7 +3025,7 @@ func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3129,41 +3041,67 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpcode) SizeBytes() int {
- return 4
+func (f *FUSEHeaderOut) SizeBytes() int {
+ return 8 +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpcode) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
+func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ dst = dst[4:]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
- *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpcode) Packed() bool {
- // Scalar newtypes are always packed.
- return true
+func (f *FUSEHeaderOut) Packed() bool {
+ return f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3180,13 +3118,23 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3202,7 +3150,15 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3210,7 +3166,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -3448,21 +3404,130 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteOut) SizeBytes() int {
- return 8
+func (f *FUSEReleaseIn) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEReleaseIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSEMknodMeta) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -3470,23 +3535,23 @@ func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteOut) Packed() bool {
+func (f *FUSEMknodMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3503,13 +3568,13 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3525,7 +3590,7 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3541,45 +3606,207 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMkdirMeta) SizeBytes() int {
- return 8
+func (f *FUSEHeaderIn) SizeBytes() int {
+ return 28 +
+ (*FUSEOpcode)(nil).SizeBytes() +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
+func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
+ dst = dst[f.Opcode.SizeBytes():]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
+ src = src[f.Opcode.SizeBytes():]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSEHeaderIn) Packed() bool {
+ return f.Opcode.Packed() && f.Unique.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSECreateMeta) SizeBytes() int {
+ return 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
+func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMkdirMeta) Packed() bool {
+func (f *FUSECreateMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3596,13 +3823,13 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3618,7 +3845,7 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3735,6 +3962,155 @@ func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FUSESetAttrIn) SizeBytes() int {
+ return 88
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
+ f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FUSESetAttrIn) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (r *RobustListHead) SizeBytes() int {
return 24
}
@@ -4473,295 +4849,6 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTGetinfo) SizeBytes() int {
- return 12 +
- (*TableName)(nil).SizeBytes() +
- 4*NF_INET_NUMHOOKS +
- 4*NF_INET_NUMHOOKS
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTGetinfo) MarshalBytes(dst []byte) {
- i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
- dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
- dst = dst[4:]
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
- dst = dst[4:]
- }
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
- dst = dst[4:]
- }
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
- i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
- src = src[i.Name.SizeBytes():]
- i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- }
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- }
- i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IPTGetinfo) Packed() bool {
- return i.Name.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTGetinfo) MarshalUnsafe(dst []byte) {
- if i.Name.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
- if i.Name.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
- if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTGetEntries) SizeBytes() int {
- return 4 +
- (*TableName)(nil).SizeBytes() +
- 1*4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTGetEntries) MarshalBytes(dst []byte) {
- i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
- dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
- dst = dst[4:]
- // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
- dst = dst[1*(4):]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
- i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
- src = src[i.Name.SizeBytes():]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
- src = src[1*(4):]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IPTGetEntries) Packed() bool {
- return i.Name.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTGetEntries) MarshalUnsafe(dst []byte) {
- if i.Name.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
- if i.Name.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
- if !i.Name.Packed() {
- // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (en *ExtensionName) SizeBytes() int {
return 1 * XT_EXTENSION_MAXNAMELEN
@@ -5508,6 +5595,295 @@ func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetinfo) SizeBytes() int {
+ return 12 +
+ (*TableName)(nil).SizeBytes() +
+ 4*NF_INET_NUMHOOKS +
+ 4*NF_INET_NUMHOOKS
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetinfo) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ dst = dst[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ dst = dst[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ dst = dst[4:]
+ }
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetinfo) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetinfo) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetEntries) SizeBytes() int {
+ return 4 +
+ (*TableName)(nil).SizeBytes() +
+ 1*4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetEntries) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetEntries) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(i._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetEntries) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetEntries) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetEntries) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetEntries) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetEntries) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetEntries) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetEntries) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetEntries doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (i *IP6TReplace) SizeBytes() int {
return 24 +
(*TableName)(nil).SizeBytes() +
@@ -7730,160 +8106,119 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrInet) SizeBytes() int {
- return 4 +
- (*InetAddr)(nil).SizeBytes() +
- 1*8
+func (c *ControlMessageCredentials) SizeBytes() int {
+ return 12
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrInet) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
- dst = dst[2:]
- s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
- dst = dst[s.Addr.SizeBytes():]
- // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0}
- dst = dst[1*(8):]
+func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrInet) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
- src = src[s.Addr.SizeBytes():]
- // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8])
- src = src[1*(8):]
+func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
+ c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrInet) Packed() bool {
- return s.Addr.Packed()
+func (c *ControlMessageCredentials) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrInet) MarshalUnsafe(dst []byte) {
- if s.Addr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
+func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(c))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
- if s.Addr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
+func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(c), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Inet6Addr) SizeBytes() int {
- return 1 * 16
+func (i *InetAddr) SizeBytes() int {
+ return 1 * 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Inet6Addr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 16; idx++ {
+func (i *InetAddr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 4; idx++ {
dst[0] = byte(i[idx])
dst = dst[1:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Inet6Addr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 16; idx++ {
+func (i *InetAddr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 4; idx++ {
i[idx] = src[0]
src = src[1:]
}
@@ -7891,24 +8226,24 @@ func (i *Inet6Addr) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Inet6Addr) Packed() bool {
+func (i *InetAddr) Packed() bool {
// Array newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Inet6Addr) MarshalUnsafe(dst []byte) {
+func (i *InetAddr) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
+func (i *InetAddr) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7925,13 +8260,13 @@ func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7947,7 +8282,7 @@ func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, erro
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
+func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7963,291 +8298,22 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *TCPInfo) SizeBytes() int {
- return 192
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TCPInfo) MarshalBytes(dst []byte) {
- dst[0] = byte(t.State)
- dst = dst[1:]
- dst[0] = byte(t.CaState)
- dst = dst[1:]
- dst[0] = byte(t.Retransmits)
- dst = dst[1:]
- dst[0] = byte(t.Probes)
- dst = dst[1:]
- dst[0] = byte(t.Backoff)
- dst = dst[1:]
- dst[0] = byte(t.Options)
- dst = dst[1:]
- dst[0] = byte(t.WindowScale)
- dst = dst[1:]
- dst[0] = byte(t.DeliveryRateAppLimited)
- dst = dst[1:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
- dst = dst[8:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TCPInfo) UnmarshalBytes(src []byte) {
- t.State = uint8(src[0])
- src = src[1:]
- t.CaState = uint8(src[0])
- src = src[1:]
- t.Retransmits = uint8(src[0])
- src = src[1:]
- t.Probes = uint8(src[0])
- src = src[1:]
- t.Backoff = uint8(src[0])
- src = src[1:]
- t.Options = uint8(src[0])
- src = src[1:]
- t.WindowScale = uint8(src[0])
- src = src[1:]
- t.DeliveryRateAppLimited = uint8(src[0])
- src = src[1:]
- t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (t *TCPInfo) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TCPInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *InetAddr) SizeBytes() int {
- return 1 * 4
+func (i *Inet6Addr) SizeBytes() int {
+ return 1 * 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *InetAddr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 4; idx++ {
+func (i *Inet6Addr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 16; idx++ {
dst[0] = byte(i[idx])
dst = dst[1:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *InetAddr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 4; idx++ {
+func (i *Inet6Addr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 16; idx++ {
i[idx] = src[0]
src = src[1:]
}
@@ -8255,24 +8321,24 @@ func (i *InetAddr) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *InetAddr) Packed() bool {
+func (i *Inet6Addr) Packed() bool {
// Array newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *InetAddr) MarshalUnsafe(dst []byte) {
+func (i *Inet6Addr) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *InetAddr) UnmarshalUnsafe(src []byte) {
+func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8289,13 +8355,13 @@ func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8311,7 +8377,7 @@ func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
+func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8555,104 +8621,6 @@ func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrUnix) SizeBytes() int {
- return 2 +
- 1*UnixPathMax
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrUnix) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- for idx := 0; idx < UnixPathMax; idx++ {
- dst[0] = byte(s.Path[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- for idx := 0; idx < UnixPathMax; idx++ {
- s.Path[idx] = int8(src[0])
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SockAddrUnix) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrUnix) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (l *Linger) SizeBytes() int {
return 8
}
@@ -8746,214 +8714,125 @@ func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (c *ControlMessageCredentials) SizeBytes() int {
- return 12
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
- c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (c *ControlMessageCredentials) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(c))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(c), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return c.CopyOutN(cc, addr, c.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *Itimerspec) SizeBytes() int {
- return 0 +
- (*Timespec)(nil).SizeBytes() +
- (*Timespec)(nil).SizeBytes()
+func (s *SockAddrInet) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ 1*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Itimerspec) MarshalBytes(dst []byte) {
- i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
- dst = dst[i.Interval.SizeBytes():]
- i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
- dst = dst[i.Value.SizeBytes():]
+func (s *SockAddrInet) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ dst = dst[2:]
+ s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
+ dst = dst[s.Addr.SizeBytes():]
+ // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0}
+ dst = dst[1*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Itimerspec) UnmarshalBytes(src []byte) {
- i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
- src = src[i.Interval.SizeBytes():]
- i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
- src = src[i.Value.SizeBytes():]
+func (s *SockAddrInet) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
+ src = src[s.Addr.SizeBytes():]
+ // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8])
+ src = src[1*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Itimerspec) Packed() bool {
- return i.Interval.Packed() && i.Value.Packed()
+func (s *SockAddrInet) Packed() bool {
+ return s.Addr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Itimerspec) MarshalUnsafe(dst []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (s *SockAddrInet) MarshalUnsafe(dst []byte) {
+ if s.Addr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
} else {
- // Type Itimerspec doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Itimerspec) UnmarshalUnsafe(src []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
+ if s.Addr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
- // Type Itimerspec doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
}
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
+func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
+ s.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
+func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}
@@ -8961,183 +8840,331 @@ func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *ItimerVal) SizeBytes() int {
- return 0 +
- (*Timeval)(nil).SizeBytes() +
- (*Timeval)(nil).SizeBytes()
+func (s *SockAddrUnix) SizeBytes() int {
+ return 2 +
+ 1*UnixPathMax
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *ItimerVal) MarshalBytes(dst []byte) {
- i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
- dst = dst[i.Interval.SizeBytes():]
- i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
- dst = dst[i.Value.SizeBytes():]
+func (s *SockAddrUnix) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ dst[0] = byte(s.Path[idx])
+ dst = dst[1:]
+ }
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *ItimerVal) UnmarshalBytes(src []byte) {
- i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
- src = src[i.Interval.SizeBytes():]
- i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
- src = src[i.Value.SizeBytes():]
+func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ s.Path[idx] = int8(src[0])
+ src = src[1:]
+ }
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *ItimerVal) Packed() bool {
- return i.Interval.Packed() && i.Value.Packed()
+func (s *SockAddrUnix) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *ItimerVal) MarshalUnsafe(dst []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
+func (s *SockAddrUnix) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
+func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimerID) SizeBytes() int {
- return 4
+func (t *TCPInfo) SizeBytes() int {
+ return 192
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimerID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
+func (t *TCPInfo) MarshalBytes(dst []byte) {
+ dst[0] = byte(t.State)
+ dst = dst[1:]
+ dst[0] = byte(t.CaState)
+ dst = dst[1:]
+ dst[0] = byte(t.Retransmits)
+ dst = dst[1:]
+ dst[0] = byte(t.Probes)
+ dst = dst[1:]
+ dst[0] = byte(t.Backoff)
+ dst = dst[1:]
+ dst[0] = byte(t.Options)
+ dst = dst[1:]
+ dst[0] = byte(t.WindowScale)
+ dst = dst[1:]
+ dst[0] = byte(t.DeliveryRateAppLimited)
+ dst = dst[1:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTO))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ATO))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndMss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvMss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Unacked))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Sacked))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Lost))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Retrans))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Fackets))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataSent))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckSent))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastDataRecv))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LastAckRecv))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.PMTU))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSsthresh))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RTTVar))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndSsthresh))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SndCwnd))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Advmss))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.Reordering))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvRTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.RcvSpace))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.TotalRetrans))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.PacingRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.MaxPacingRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesAcked))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BytesReceived))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsOut))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.SegsIn))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.NotSentBytes))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.MinRTT))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsIn))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.DataSegsOut))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.DeliveryRate))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.BusyTime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.RwndLimited))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(t.SndBufLimited))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimerID) UnmarshalBytes(src []byte) {
- *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
+func (t *TCPInfo) UnmarshalBytes(src []byte) {
+ t.State = uint8(src[0])
+ src = src[1:]
+ t.CaState = uint8(src[0])
+ src = src[1:]
+ t.Retransmits = uint8(src[0])
+ src = src[1:]
+ t.Probes = uint8(src[0])
+ src = src[1:]
+ t.Backoff = uint8(src[0])
+ src = src[1:]
+ t.Options = uint8(src[0])
+ src = src[1:]
+ t.WindowScale = uint8(src[0])
+ src = src[1:]
+ t.DeliveryRateAppLimited = uint8(src[0])
+ src = src[1:]
+ t.RTO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ATO = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvMss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Unacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Sacked = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Lost = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Retrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Fackets = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckSent = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastDataRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LastAckRecv = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PMTU = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RTTVar = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndSsthresh = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SndCwnd = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Advmss = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.Reordering = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.RcvSpace = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.TotalRetrans = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.PacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.MaxPacingRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesAcked = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BytesReceived = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.SegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.NotSentBytes = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.MinRTT = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsIn = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DataSegsOut = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.DeliveryRate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.BusyTime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.RwndLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ t.SndBufLimited = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *TimerID) Packed() bool {
- // Scalar newtypes are always packed.
+func (t *TCPInfo) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimerID) MarshalUnsafe(dst []byte) {
+func (t *TCPInfo) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(t))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimerID) UnmarshalUnsafe(src []byte) {
+func (t *TCPInfo) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TCPInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9154,13 +9181,13 @@ func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TCPInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TCPInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9176,7 +9203,7 @@ func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
+func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9184,7 +9211,7 @@ func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
runtime.KeepAlive(t) // escapes: replaced by intrinsic.
@@ -9382,105 +9409,194 @@ func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (tv *Timeval) SizeBytes() int {
+//go:nosplit
+func (t *TimeT) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TimeT) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TimeT) UnmarshalBytes(src []byte) {
+ *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TimeT) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TimeT) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TimeT) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (ts *Timespec) SizeBytes() int {
return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (tv *Timeval) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Sec))
+func (ts *Timespec) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Sec))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Usec))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Nsec))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (tv *Timeval) UnmarshalBytes(src []byte) {
- tv.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+func (ts *Timespec) UnmarshalBytes(src []byte) {
+ ts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- tv.Usec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ ts.Nsec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (tv *Timeval) Packed() bool {
+func (ts *Timespec) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (tv *Timeval) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(tv))
+func (ts *Timespec) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(ts))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (tv *Timeval) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(tv), src)
+func (ts *Timespec) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(ts), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
- hdr.Len = tv.SizeBytes()
- hdr.Cap = tv.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that tv
+ // Since we bypassed the compiler's escape analysis, indicate that ts
// must live until the use above.
- runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (tv *Timeval) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return tv.CopyOutN(cc, addr, tv.SizeBytes())
+func (ts *Timespec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return ts.CopyOutN(cc, addr, ts.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (tv *Timeval) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (ts *Timespec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
- hdr.Len = tv.SizeBytes()
- hdr.Cap = tv.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that tv
+ // Since we bypassed the compiler's escape analysis, indicate that ts
// must live until the use above.
- runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (tv *Timeval) WriteTo(writer io.Writer) (int64, error) {
+func (ts *Timespec) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
- hdr.Len = tv.SizeBytes()
- hdr.Cap = tv.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
+ hdr.Len = ts.SizeBytes()
+ hdr.Cap = ts.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that tv
+ // Since we bypassed the compiler's escape analysis, indicate that ts
// must live until the use above.
- runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyTimevalSliceIn copies in a slice of Timeval objects from the task's memory.
-func CopyTimevalSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timeval) (int, error) {
+// CopyTimespecSliceIn copies in a slice of Timespec objects from the task's memory.
+func CopyTimespecSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timespec) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Timeval)(nil).SizeBytes()
+ size := (*Timespec)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9499,13 +9615,13 @@ func CopyTimevalSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timeval
return length, err
}
-// CopyTimevalSliceOut copies a slice of Timeval objects to the task's memory.
-func CopyTimevalSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timeval) (int, error) {
+// CopyTimespecSliceOut copies a slice of Timespec objects to the task's memory.
+func CopyTimespecSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timespec) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Timeval)(nil).SizeBytes()
+ size := (*Timespec)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9524,13 +9640,13 @@ func CopyTimevalSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timeva
return length, err
}
-// MarshalUnsafeTimevalSlice is like Timeval.MarshalUnsafe, but for a []Timeval.
-func MarshalUnsafeTimevalSlice(src []Timeval, dst []byte) (int, error) {
+// MarshalUnsafeTimespecSlice is like Timespec.MarshalUnsafe, but for a []Timespec.
+func MarshalUnsafeTimespecSlice(src []Timespec, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Timeval)(nil).SizeBytes()
+ size := (*Timespec)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9542,13 +9658,13 @@ func MarshalUnsafeTimevalSlice(src []Timeval, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeTimevalSlice is like Timeval.UnmarshalUnsafe, but for a []Timeval.
-func UnmarshalUnsafeTimevalSlice(dst []Timeval, src []byte) (int, error) {
+// UnmarshalUnsafeTimespecSlice is like Timespec.UnmarshalUnsafe, but for a []Timespec.
+func UnmarshalUnsafeTimespecSlice(dst []Timespec, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Timeval)(nil).SizeBytes()
+ size := (*Timespec)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9561,105 +9677,105 @@ func UnmarshalUnsafeTimevalSlice(dst []Timeval, src []byte) (int, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (ts *Timespec) SizeBytes() int {
+func (tv *Timeval) SizeBytes() int {
return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (ts *Timespec) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Sec))
+func (tv *Timeval) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Sec))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(ts.Nsec))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(tv.Usec))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (ts *Timespec) UnmarshalBytes(src []byte) {
- ts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+func (tv *Timeval) UnmarshalBytes(src []byte) {
+ tv.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- ts.Nsec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ tv.Usec = int64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (ts *Timespec) Packed() bool {
+func (tv *Timeval) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (ts *Timespec) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(ts))
+func (tv *Timeval) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(tv))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (ts *Timespec) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(ts), src)
+func (tv *Timeval) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(tv), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (ts *Timespec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (tv *Timeval) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
- hdr.Len = ts.SizeBytes()
- hdr.Cap = ts.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that ts
+ // Since we bypassed the compiler's escape analysis, indicate that tv
// must live until the use above.
- runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (ts *Timespec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return ts.CopyOutN(cc, addr, ts.SizeBytes())
+func (tv *Timeval) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return tv.CopyOutN(cc, addr, tv.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (ts *Timespec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (tv *Timeval) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
- hdr.Len = ts.SizeBytes()
- hdr.Cap = ts.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that ts
+ // Since we bypassed the compiler's escape analysis, indicate that tv
// must live until the use above.
- runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (ts *Timespec) WriteTo(writer io.Writer) (int64, error) {
+func (tv *Timeval) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(ts)))
- hdr.Len = ts.SizeBytes()
- hdr.Cap = ts.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(tv)))
+ hdr.Len = tv.SizeBytes()
+ hdr.Cap = tv.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that ts
+ // Since we bypassed the compiler's escape analysis, indicate that tv
// must live until the use above.
- runtime.KeepAlive(ts) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(tv) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyTimespecSliceIn copies in a slice of Timespec objects from the task's memory.
-func CopyTimespecSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timespec) (int, error) {
+// CopyTimevalSliceIn copies in a slice of Timeval objects from the task's memory.
+func CopyTimevalSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timeval) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Timespec)(nil).SizeBytes()
+ size := (*Timeval)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9678,13 +9794,13 @@ func CopyTimespecSliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []Timesp
return length, err
}
-// CopyTimespecSliceOut copies a slice of Timespec objects to the task's memory.
-func CopyTimespecSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timespec) (int, error) {
+// CopyTimevalSliceOut copies a slice of Timeval objects to the task's memory.
+func CopyTimevalSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Timeval) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Timespec)(nil).SizeBytes()
+ size := (*Timeval)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9703,13 +9819,13 @@ func CopyTimespecSliceOut(cc marshal.CopyContext, addr usermem.Addr, src []Times
return length, err
}
-// MarshalUnsafeTimespecSlice is like Timespec.MarshalUnsafe, but for a []Timespec.
-func MarshalUnsafeTimespecSlice(src []Timespec, dst []byte) (int, error) {
+// MarshalUnsafeTimevalSlice is like Timeval.MarshalUnsafe, but for a []Timeval.
+func MarshalUnsafeTimevalSlice(src []Timeval, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Timespec)(nil).SizeBytes()
+ size := (*Timeval)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9721,13 +9837,13 @@ func MarshalUnsafeTimespecSlice(src []Timespec, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeTimespecSlice is like Timespec.UnmarshalUnsafe, but for a []Timespec.
-func UnmarshalUnsafeTimespecSlice(dst []Timespec, src []byte) (int, error) {
+// UnmarshalUnsafeTimevalSlice is like Timeval.UnmarshalUnsafe, but for a []Timeval.
+func UnmarshalUnsafeTimevalSlice(dst []Timeval, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Timespec)(nil).SizeBytes()
+ size := (*Timeval)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -9740,6 +9856,136 @@ func UnmarshalUnsafeTimespecSlice(dst []Timespec, src []byte) (int, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *Itimerspec) SizeBytes() int {
+ return 0 +
+ (*Timespec)(nil).SizeBytes() +
+ (*Timespec)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Itimerspec) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Itimerspec) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Itimerspec) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Itimerspec) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type Itimerspec doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Itimerspec) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type Itimerspec doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Itimerspec) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Itimerspec) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Itimerspec) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Itimerspec) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type Itimerspec doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
func (c *ClockT) SizeBytes() int {
return 8
@@ -9829,127 +10075,117 @@ func (c *ClockT) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Tms) SizeBytes() int {
+func (i *ItimerVal) SizeBytes() int {
return 0 +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes()
+ (*Timeval)(nil).SizeBytes() +
+ (*Timeval)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Tms) MarshalBytes(dst []byte) {
- t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()])
- dst = dst[t.UTime.SizeBytes():]
- t.STime.MarshalBytes(dst[:t.STime.SizeBytes()])
- dst = dst[t.STime.SizeBytes():]
- t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()])
- dst = dst[t.CUTime.SizeBytes():]
- t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()])
- dst = dst[t.CSTime.SizeBytes():]
+func (i *ItimerVal) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Tms) UnmarshalBytes(src []byte) {
- t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()])
- src = src[t.UTime.SizeBytes():]
- t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()])
- src = src[t.STime.SizeBytes():]
- t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()])
- src = src[t.CUTime.SizeBytes():]
- t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()])
- src = src[t.CSTime.SizeBytes():]
+func (i *ItimerVal) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *Tms) Packed() bool {
- return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed()
+func (i *ItimerVal) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *Tms) MarshalUnsafe(dst []byte) {
- if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
+func (i *ItimerVal) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
- // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
- t.MarshalBytes(dst)
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *Tms) UnmarshalUnsafe(src []byte) {
- if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- safecopy.CopyOut(unsafe.Pointer(t), src)
+func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
- // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- t.UnmarshalBytes(src)
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
}
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
- t.MarshalBytes(buf) // escapes: fallback.
+func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
+func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- t.UnmarshalBytes(buf) // escapes: fallback.
+ i.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, t.SizeBytes())
- t.MarshalBytes(buf)
+func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}
@@ -9957,53 +10193,86 @@ func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimeT) SizeBytes() int {
- return 8
+func (t *Tms) SizeBytes() int {
+ return 0 +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimeT) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+func (t *Tms) MarshalBytes(dst []byte) {
+ t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()])
+ dst = dst[t.UTime.SizeBytes():]
+ t.STime.MarshalBytes(dst[:t.STime.SizeBytes()])
+ dst = dst[t.STime.SizeBytes():]
+ t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()])
+ dst = dst[t.CUTime.SizeBytes():]
+ t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()])
+ dst = dst[t.CSTime.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimeT) UnmarshalBytes(src []byte) {
- *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (t *Tms) UnmarshalBytes(src []byte) {
+ t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()])
+ src = src[t.UTime.SizeBytes():]
+ t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()])
+ src = src[t.STime.SizeBytes():]
+ t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()])
+ src = src[t.CUTime.SizeBytes():]
+ t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()])
+ src = src[t.CSTime.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *TimeT) Packed() bool {
- // Scalar newtypes are always packed.
- return true
+func (t *Tms) Packed() bool {
+ return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimeT) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
+func (t *Tms) MarshalUnsafe(dst []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
+ t.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimeT) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
+func (t *Tms) UnmarshalUnsafe(src []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ t.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ t.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10020,13 +10289,23 @@ func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ t.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10042,7 +10321,15 @@ func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, t.SizeBytes())
+ t.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10050,7 +10337,7 @@ func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
runtime.KeepAlive(t) // escapes: replaced by intrinsic.
@@ -10058,66 +10345,41 @@ func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Termios) SizeBytes() int {
- return 17 +
- 1*NumControlCharacters
+//go:nosplit
+func (t *TimerID) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Termios) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.InputFlags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.OutputFlags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ControlFlags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LocalFlags))
- dst = dst[4:]
- dst[0] = byte(t.LineDiscipline)
- dst = dst[1:]
- for idx := 0; idx < NumControlCharacters; idx++ {
- dst[0] = byte(t.ControlCharacters[idx])
- dst = dst[1:]
- }
+func (t *TimerID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Termios) UnmarshalBytes(src []byte) {
- t.InputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.OutputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.ControlFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LocalFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- t.LineDiscipline = uint8(src[0])
- src = src[1:]
- for idx := 0; idx < NumControlCharacters; idx++ {
- t.ControlCharacters[idx] = uint8(src[0])
- src = src[1:]
- }
+func (t *TimerID) UnmarshalBytes(src []byte) {
+ *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *Termios) Packed() bool {
+func (t *TimerID) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *Termios) MarshalUnsafe(dst []byte) {
+func (t *TimerID) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(t))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *Termios) UnmarshalUnsafe(src []byte) {
+func (t *TimerID) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *Termios) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10134,13 +10396,13 @@ func (t *Termios) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *Termios) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *Termios) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10156,7 +10418,7 @@ func (t *Termios) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *Termios) WriteTo(writer io.Writer) (int64, error) {
+func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10164,7 +10426,7 @@ func (t *Termios) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
runtime.KeepAlive(t) // escapes: replaced by intrinsic.
@@ -10371,6 +10633,120 @@ func (w *Winsize) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (t *Termios) SizeBytes() int {
+ return 17 +
+ 1*NumControlCharacters
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *Termios) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.InputFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.OutputFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.ControlFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(t.LocalFlags))
+ dst = dst[4:]
+ dst[0] = byte(t.LineDiscipline)
+ dst = dst[1:]
+ for idx := 0; idx < NumControlCharacters; idx++ {
+ dst[0] = byte(t.ControlCharacters[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *Termios) UnmarshalBytes(src []byte) {
+ t.InputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.OutputFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.ControlFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LocalFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ t.LineDiscipline = uint8(src[0])
+ src = src[1:]
+ for idx := 0; idx < NumControlCharacters; idx++ {
+ t.ControlCharacters[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *Termios) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *Termios) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *Termios) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *Termios) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *Termios) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *Termios) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *Termios) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (u *UtsName) SizeBytes() int {
return 0 +
1*(UTSLen+1) +
diff --git a/pkg/abi/linux/sem.go b/pkg/abi/linux/sem.go
index 0adff8dff..2424884c1 100644
--- a/pkg/abi/linux/sem.go
+++ b/pkg/abi/linux/sem.go
@@ -43,10 +43,10 @@ const (
SEMVMX = 32767
SEMAEM = SEMVMX
- // followings are unused in kernel
SEMUME = SEMOPM
SEMMNU = SEMMNS
SEMMAP = SEMMNS
+ SEMUSZ = 20
)
const SEM_UNDO = 0x1000