summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go3676
-rw-r--r--pkg/marshal/primitive/primitive_abi_autogen_unsafe.go400
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go220
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go336
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go9
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go9
-rw-r--r--pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go9
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go9
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go9
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go9
-rw-r--r--pkg/sentry/kernel/fd_table_refs.go9
-rw-r--r--pkg/sentry/kernel/fs_context_refs.go9
-rw-r--r--pkg/sentry/kernel/ipc_namespace_refs.go9
-rw-r--r--pkg/sentry/kernel/process_group_refs.go9
-rw-r--r--pkg/sentry/kernel/session_refs.go9
-rw-r--r--pkg/sentry/kernel/shm/shm_refs.go9
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go9
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go9
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go9
-rw-r--r--pkg/sentry/socket/unix/socket_vfs2_refs.go9
-rw-r--r--pkg/sentry/socket/unix/transport/queue_refs.go9
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go200
-rw-r--r--pkg/sentry/vfs/file_description_refs.go9
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go9
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go9
-rw-r--r--pkg/tcpip/link/tun/tun_endpoint_refs.go9
33 files changed, 2472 insertions, 2612 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 04552c4e7..b16994e56 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -521,99 +521,6 @@ func UnmarshalUnsafeBPFInstructionSlice(dst []BPFInstruction, src []byte) (int,
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (c *CapUserHeader) SizeBytes() int {
- return 8
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (c *CapUserHeader) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Version))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Pid))
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (c *CapUserHeader) UnmarshalBytes(src []byte) {
- c.Version = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.Pid = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (c *CapUserHeader) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (c *CapUserHeader) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(c))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (c *CapUserHeader) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(c), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return c.CopyOutN(cc, addr, c.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (c *CapUserHeader) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that c
- // must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (c *CapUserData) SizeBytes() int {
return 12
}
@@ -797,6 +704,99 @@ func UnmarshalUnsafeCapUserDataSlice(dst []CapUserData, src []byte) (int, error)
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (c *CapUserHeader) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (c *CapUserHeader) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Version))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.Pid))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (c *CapUserHeader) UnmarshalBytes(src []byte) {
+ c.Version = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.Pid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (c *CapUserHeader) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (c *CapUserHeader) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(c))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (c *CapUserHeader) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(c), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (c *CapUserHeader) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (c *CapUserHeader) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (c *CapUserHeader) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (c *CapUserHeader) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that c
+ // must live until the use above.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (f *Flock) SizeBytes() int {
return 24 +
1*4 +
@@ -1352,41 +1352,77 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReadIn) SizeBytes() int {
- return 40
+func (f *FUSEAttr) SizeBytes() int {
+ return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReadIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+func (f *FUSEAttr) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEAttr) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -1394,23 +1430,23 @@ func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReadIn) Packed() bool {
+func (f *FUSEAttr) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1427,13 +1463,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1449,7 +1485,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1465,29 +1501,21 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMknodMeta) SizeBytes() int {
- return 16
+func (f *FUSEOpenIn) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -1495,23 +1523,23 @@ func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMknodMeta) Packed() bool {
+func (f *FUSEOpenIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1528,13 +1556,13 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1550,7 +1578,7 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1566,53 +1594,65 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEDirentMeta) SizeBytes() int {
- return 24
+func (f *FUSEReadIn) SizeBytes() int {
+ return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+func (f *FUSEReadIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEDirentMeta) Packed() bool {
+func (f *FUSEReadIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1629,13 +1669,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1651,7 +1691,7 @@ func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1667,45 +1707,65 @@ func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteOut) SizeBytes() int {
- return 8
+func (f *FUSEWriteIn) SizeBytes() int {
+ return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteOut) Packed() bool {
+func (f *FUSEWriteIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1722,13 +1782,13 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1744,7 +1804,7 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1760,53 +1820,41 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSECreateMeta) SizeBytes() int {
- return 16
+//go:nosplit
+func (f *FUSEOpID) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
+func (f *FUSEOpID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
+func (f *FUSEOpID) UnmarshalBytes(src []byte) {
+ *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSECreateMeta) Packed() bool {
+func (f *FUSEOpID) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1823,13 +1871,13 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1845,7 +1893,7 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1853,7 +1901,7 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -1861,41 +1909,49 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpID) SizeBytes() int {
- return 8
+func (f *FUSEGetAttrIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
+func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpID) UnmarshalBytes(src []byte) {
- *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
+ f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpID) Packed() bool {
- // Scalar newtypes are always packed.
+func (f *FUSEGetAttrIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
+func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
+func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1912,13 +1968,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1934,7 +1990,7 @@ func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1942,7 +1998,7 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -1950,53 +2006,67 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitIn) SizeBytes() int {
- return 16
+func (f *FUSEHeaderOut) SizeBytes() int {
+ return 8 +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
dst = dst[4:]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitIn) Packed() bool {
- return true
+func (f *FUSEHeaderOut) Packed() bool {
+ return f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2013,13 +2083,23 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2035,7 +2115,15 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2051,82 +2139,53 @@ func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitOut) SizeBytes() int {
- return 32 +
- 4*8
+func (f *FUSEMknodMeta) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
- dst = dst[2:]
- // Padding: dst[:sizeof(uint16)] ~= uint16(0)
- dst = dst[2:]
- // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
- dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- // Padding: var _ uint16 ~= src[:sizeof(uint16)]
- src = src[2:]
- // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
- src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitOut) Packed() bool {
+func (f *FUSEMknodMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2143,13 +2202,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2165,7 +2224,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2181,17 +2240,21 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEAttr) SizeBytes() int {
+func (f *FUSESetAttrIn) SizeBytes() int {
return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEAttr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
+func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
dst = dst[8:]
@@ -2207,27 +2270,27 @@ func (f *FUSEAttr) MarshalBytes(dst []byte) {
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
- dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEAttr) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
+ f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
@@ -2243,39 +2306,35 @@ func (f *FUSEAttr) UnmarshalBytes(src []byte) {
src = src[4:]
f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEAttr) Packed() bool {
+func (f *FUSESetAttrIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
+func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
+func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2292,13 +2351,13 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2314,7 +2373,7 @@ func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2330,66 +2389,83 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrOut) SizeBytes() int {
- return 16 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSEHeaderIn) SizeBytes() int {
+ return 28 +
+ (*FUSEOpcode)(nil).SizeBytes() +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
+ dst = dst[f.Opcode.SizeBytes():]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
+ src = src[f.Opcode.SizeBytes():]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEGetAttrOut) Packed() bool {
- return f.Attr.Packed()
+func (f *FUSEHeaderIn) Packed() bool {
+ return f.Opcode.Packed() && f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
+func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(f))
} else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
f.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
+func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
safecopy.CopyOut(unsafe.Pointer(f), src)
} else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
f.UnmarshalBytes(src)
}
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
f.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
@@ -2411,15 +2487,15 @@ func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
@@ -2443,9 +2519,9 @@ func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, f.SizeBytes())
f.MarshalBytes(buf)
length, err := writer.Write(buf)
@@ -2467,49 +2543,82 @@ func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenOut) SizeBytes() int {
- return 16
+func (f *FUSEInitOut) SizeBytes() int {
+ return 32 +
+ 4*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
+func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
+ dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
+ src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenOut) Packed() bool {
+func (f *FUSEInitOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2526,13 +2635,13 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2548,7 +2657,7 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2564,45 +2673,71 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMkdirMeta) SizeBytes() int {
- return 8
+func (f *FUSEGetAttrOut) SizeBytes() int {
+ return 16 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMkdirMeta) Packed() bool {
- return true
+func (f *FUSEGetAttrOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2619,13 +2754,23 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2641,7 +2786,15 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2657,83 +2810,78 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderIn) SizeBytes() int {
- return 28 +
- (*FUSEOpcode)(nil).SizeBytes() +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSEEntryOut) SizeBytes() int {
+ return 40 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
- dst = dst[4:]
- f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
- dst = dst[f.Opcode.SizeBytes():]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
+func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
- src = src[f.Opcode.SizeBytes():]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
+func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderIn) Packed() bool {
- return f.Opcode.Packed() && f.Unique.Packed()
+func (f *FUSEEntryOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
+func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(f))
} else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
f.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
+func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
safecopy.CopyOut(unsafe.Pointer(f), src)
} else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
f.UnmarshalBytes(src)
}
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
f.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
@@ -2755,15 +2903,15 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
@@ -2787,9 +2935,9 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, f.SizeBytes())
f.MarshalBytes(buf)
length, err := writer.Write(buf)
@@ -2811,67 +2959,49 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderOut) SizeBytes() int {
- return 8 +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSEOpenOut) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderOut) Packed() bool {
- return f.Unique.Packed()
+func (f *FUSEOpenOut) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
- if f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
- if f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2888,23 +3018,13 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2920,15 +3040,7 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2944,53 +3056,45 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReleaseIn) SizeBytes() int {
- return 24
+func (f *FUSEWriteOut) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReleaseIn) Packed() bool {
+func (f *FUSEWriteOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3007,13 +3111,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3029,7 +3133,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3045,101 +3149,53 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSESetAttrIn) SizeBytes() int {
- return 88
+func (f *FUSEReleaseIn) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
+func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
- f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
+func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSESetAttrIn) Packed() bool {
+func (f *FUSEReleaseIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3156,13 +3212,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3178,7 +3234,7 @@ func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3194,41 +3250,53 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpcode) SizeBytes() int {
- return 4
+func (f *FUSECreateMeta) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpcode) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
+func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
- *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpcode) Packed() bool {
- // Scalar newtypes are always packed.
+func (f *FUSECreateMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
+func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
+func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3245,13 +3313,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3267,7 +3335,7 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3275,7 +3343,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -3283,49 +3351,41 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrIn) SizeBytes() int {
- return 16
+//go:nosplit
+func (f *FUSEOpcode) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
+func (f *FUSEOpcode) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
- f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
+func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
+ *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEGetAttrIn) Packed() bool {
+func (f *FUSEOpcode) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3342,13 +3402,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3364,7 +3424,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3372,7 +3432,7 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -3380,83 +3440,53 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEEntryOut) SizeBytes() int {
- return 40 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSEInitIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
+func (f *FUSEInitIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEEntryOut) Packed() bool {
- return f.Attr.Packed()
+func (f *FUSEInitIn) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3473,23 +3503,13 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3505,15 +3525,7 @@ func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3529,45 +3541,45 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenIn) SizeBytes() int {
+func (f *FUSEMkdirMeta) SizeBytes() int {
return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenIn) Packed() bool {
+func (f *FUSEMkdirMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3584,13 +3596,13 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3606,7 +3618,7 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3622,65 +3634,53 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteIn) SizeBytes() int {
- return 40
+func (f *FUSEDirentMeta) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteIn) Packed() bool {
+func (f *FUSEDirentMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3697,13 +3697,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3719,7 +3719,7 @@ func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -4473,6 +4473,340 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTEntry) SizeBytes() int {
+ return 12 +
+ (*IPTIP)(nil).SizeBytes() +
+ (*XTCounters)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTEntry) MarshalBytes(dst []byte) {
+ i.IP.MarshalBytes(dst[:i.IP.SizeBytes()])
+ dst = dst[i.IP.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
+ dst = dst[4:]
+ i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()])
+ dst = dst[i.Counters.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTEntry) UnmarshalBytes(src []byte) {
+ i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()])
+ src = src[i.IP.SizeBytes():]
+ i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()])
+ src = src[i.Counters.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTEntry) Packed() bool {
+ return i.Counters.Packed() && i.IP.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTEntry) MarshalUnsafe(dst []byte) {
+ if i.Counters.Packed() && i.IP.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTEntry) UnmarshalUnsafe(src []byte) {
+ if i.Counters.Packed() && i.IP.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Counters.Packed() && i.IP.Packed() {
+ // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTIP) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ (*InetAddr)(nil).SizeBytes() +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ +
+ 1*IFNAMSIZ
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTIP) MarshalBytes(dst []byte) {
+ i.Src.MarshalBytes(dst[:i.Src.SizeBytes()])
+ dst = dst[i.Src.SizeBytes():]
+ i.Dst.MarshalBytes(dst[:i.Dst.SizeBytes()])
+ dst = dst[i.Dst.SizeBytes():]
+ i.SrcMask.MarshalBytes(dst[:i.SrcMask.SizeBytes()])
+ dst = dst[i.SrcMask.SizeBytes():]
+ i.DstMask.MarshalBytes(dst[:i.DstMask.SizeBytes()])
+ dst = dst[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterface[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.InputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ dst[0] = byte(i.OutputInterfaceMask[idx])
+ dst = dst[1:]
+ }
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
+ dst = dst[2:]
+ dst[0] = byte(i.Flags)
+ dst = dst[1:]
+ dst[0] = byte(i.InverseFlags)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTIP) UnmarshalBytes(src []byte) {
+ i.Src.UnmarshalBytes(src[:i.Src.SizeBytes()])
+ src = src[i.Src.SizeBytes():]
+ i.Dst.UnmarshalBytes(src[:i.Dst.SizeBytes()])
+ src = src[i.Dst.SizeBytes():]
+ i.SrcMask.UnmarshalBytes(src[:i.SrcMask.SizeBytes()])
+ src = src[i.SrcMask.SizeBytes():]
+ i.DstMask.UnmarshalBytes(src[:i.DstMask.SizeBytes()])
+ src = src[i.DstMask.SizeBytes():]
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterface[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.InputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < IFNAMSIZ; idx++ {
+ i.OutputInterfaceMask[idx] = src[0]
+ src = src[1:]
+ }
+ i.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ i.Flags = uint8(src[0])
+ src = src[1:]
+ i.InverseFlags = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTIP) Packed() bool {
+ return i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTIP) MarshalUnsafe(dst []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTIP) UnmarshalUnsafe(src []byte) {
+ if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
+ // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (x *XTCounters) SizeBytes() int {
return 16
}
@@ -5174,340 +5508,6 @@ func (tn *TableName) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTEntry) SizeBytes() int {
- return 12 +
- (*IPTIP)(nil).SizeBytes() +
- (*XTCounters)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTEntry) MarshalBytes(dst []byte) {
- i.IP.MarshalBytes(dst[:i.IP.SizeBytes()])
- dst = dst[i.IP.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NFCache))
- dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.TargetOffset))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.NextOffset))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Comeback))
- dst = dst[4:]
- i.Counters.MarshalBytes(dst[:i.Counters.SizeBytes()])
- dst = dst[i.Counters.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTEntry) UnmarshalBytes(src []byte) {
- i.IP.UnmarshalBytes(src[:i.IP.SizeBytes()])
- src = src[i.IP.SizeBytes():]
- i.NFCache = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.TargetOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- i.NextOffset = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- i.Comeback = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.Counters.UnmarshalBytes(src[:i.Counters.SizeBytes()])
- src = src[i.Counters.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IPTEntry) Packed() bool {
- return i.Counters.Packed() && i.IP.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTEntry) MarshalUnsafe(dst []byte) {
- if i.Counters.Packed() && i.IP.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTEntry) UnmarshalUnsafe(src []byte) {
- if i.Counters.Packed() && i.IP.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IPTEntry doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IPTEntry) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Counters.Packed() && i.IP.Packed() {
- // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IPTEntry) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IPTEntry) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Counters.Packed() && i.IP.Packed() {
- // Type IPTEntry doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTEntry) WriteTo(writer io.Writer) (int64, error) {
- if !i.Counters.Packed() && i.IP.Packed() {
- // Type IPTEntry doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTIP) SizeBytes() int {
- return 4 +
- (*InetAddr)(nil).SizeBytes() +
- (*InetAddr)(nil).SizeBytes() +
- (*InetAddr)(nil).SizeBytes() +
- (*InetAddr)(nil).SizeBytes() +
- 1*IFNAMSIZ +
- 1*IFNAMSIZ +
- 1*IFNAMSIZ +
- 1*IFNAMSIZ
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTIP) MarshalBytes(dst []byte) {
- i.Src.MarshalBytes(dst[:i.Src.SizeBytes()])
- dst = dst[i.Src.SizeBytes():]
- i.Dst.MarshalBytes(dst[:i.Dst.SizeBytes()])
- dst = dst[i.Dst.SizeBytes():]
- i.SrcMask.MarshalBytes(dst[:i.SrcMask.SizeBytes()])
- dst = dst[i.SrcMask.SizeBytes():]
- i.DstMask.MarshalBytes(dst[:i.DstMask.SizeBytes()])
- dst = dst[i.DstMask.SizeBytes():]
- for idx := 0; idx < IFNAMSIZ; idx++ {
- dst[0] = byte(i.InputInterface[idx])
- dst = dst[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- dst[0] = byte(i.OutputInterface[idx])
- dst = dst[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- dst[0] = byte(i.InputInterfaceMask[idx])
- dst = dst[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- dst[0] = byte(i.OutputInterfaceMask[idx])
- dst = dst[1:]
- }
- usermem.ByteOrder.PutUint16(dst[:2], uint16(i.Protocol))
- dst = dst[2:]
- dst[0] = byte(i.Flags)
- dst = dst[1:]
- dst[0] = byte(i.InverseFlags)
- dst = dst[1:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTIP) UnmarshalBytes(src []byte) {
- i.Src.UnmarshalBytes(src[:i.Src.SizeBytes()])
- src = src[i.Src.SizeBytes():]
- i.Dst.UnmarshalBytes(src[:i.Dst.SizeBytes()])
- src = src[i.Dst.SizeBytes():]
- i.SrcMask.UnmarshalBytes(src[:i.SrcMask.SizeBytes()])
- src = src[i.SrcMask.SizeBytes():]
- i.DstMask.UnmarshalBytes(src[:i.DstMask.SizeBytes()])
- src = src[i.DstMask.SizeBytes():]
- for idx := 0; idx < IFNAMSIZ; idx++ {
- i.InputInterface[idx] = src[0]
- src = src[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- i.OutputInterface[idx] = src[0]
- src = src[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- i.InputInterfaceMask[idx] = src[0]
- src = src[1:]
- }
- for idx := 0; idx < IFNAMSIZ; idx++ {
- i.OutputInterfaceMask[idx] = src[0]
- src = src[1:]
- }
- i.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- i.Flags = uint8(src[0])
- src = src[1:]
- i.InverseFlags = uint8(src[0])
- src = src[1:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IPTIP) Packed() bool {
- return i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTIP) MarshalUnsafe(dst []byte) {
- if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IPTIP) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
- // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IPTIP) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IPTIP) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
- // Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTIP) WriteTo(writer io.Writer) (int64, error) {
- if !i.Dst.Packed() && i.DstMask.Packed() && i.Src.Packed() && i.SrcMask.Packed() {
- // Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (i *IP6TIP) SizeBytes() int {
return 5 +
(*Inet6Addr)(nil).SizeBytes() +
@@ -7730,6 +7730,239 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrInet) SizeBytes() int {
+ return 4 +
+ (*InetAddr)(nil).SizeBytes() +
+ 1*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrInet) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ dst = dst[2:]
+ s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
+ dst = dst[s.Addr.SizeBytes():]
+ // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0}
+ dst = dst[1*(8):]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrInet) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
+ src = src[s.Addr.SizeBytes():]
+ // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8])
+ src = src[1*(8):]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrInet) Packed() bool {
+ return s.Addr.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrInet) MarshalUnsafe(dst []byte) {
+ if s.Addr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
+ if s.Addr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Addr.Packed() {
+ // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *Inet6Addr) SizeBytes() int {
+ return 1 * 16
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *Inet6Addr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(i[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *Inet6Addr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 16; idx++ {
+ i[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *Inet6Addr) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *Inet6Addr) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SockAddrInet6) SizeBytes() int {
return 12 +
1*16
@@ -7938,6 +8171,312 @@ func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *InetAddr) SizeBytes() int {
+ return 1 * 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetAddr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(i[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *InetAddr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 4; idx++ {
+ i[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *InetAddr) Packed() bool {
+ // Array newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *InetAddr) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *InetAddr) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SockAddrLink) SizeBytes() int {
+ return 12 +
+ 1*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SockAddrLink) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
+ dst = dst[2:]
+ dst[0] = byte(s.PacketType)
+ dst = dst[1:]
+ dst[0] = byte(s.HardwareAddrLen)
+ dst = dst[1:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s.HardwareAddr[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SockAddrLink) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.PacketType = src[0]
+ src = src[1:]
+ s.HardwareAddrLen = src[0]
+ src = src[1:]
+ for idx := 0; idx < 8; idx++ {
+ s.HardwareAddr[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SockAddrLink) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SockAddrLink) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (l *Linger) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (l *Linger) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (l *Linger) UnmarshalBytes(src []byte) {
+ l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ l.Linger = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (l *Linger) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (l *Linger) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(l))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (l *Linger) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(l), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return l.CopyOutN(cc, addr, l.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (t *TCPInfo) SizeBytes() int {
return 192
}
@@ -8207,638 +8746,378 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (i *InetAddr) SizeBytes() int {
- return 1 * 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *InetAddr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 4; idx++ {
- dst[0] = byte(i[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *InetAddr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 4; idx++ {
- i[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *InetAddr) Packed() bool {
- // Array newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *InetAddr) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *InetAddr) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (i *Inet6Addr) SizeBytes() int {
- return 1 * 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Inet6Addr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 16; idx++ {
- dst[0] = byte(i[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Inet6Addr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 16; idx++ {
- i[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *Inet6Addr) Packed() bool {
- // Array newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Inet6Addr) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Inet6Addr) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *Inet6Addr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *Inet6Addr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *Inet6Addr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrLink) SizeBytes() int {
- return 12 +
- 1*8
+func (c *ControlMessageCredentials) SizeBytes() int {
+ return 12
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrLink) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
+func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
- dst = dst[2:]
- dst[0] = byte(s.PacketType)
- dst = dst[1:]
- dst[0] = byte(s.HardwareAddrLen)
- dst = dst[1:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s.HardwareAddr[idx])
- dst = dst[1:]
- }
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrLink) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
+ c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.PacketType = src[0]
- src = src[1:]
- s.HardwareAddrLen = src[0]
- src = src[1:]
- for idx := 0; idx < 8; idx++ {
- s.HardwareAddr[idx] = src[0]
- src = src[1:]
- }
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrLink) Packed() bool {
+func (c *ControlMessageCredentials) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrLink) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
+func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(c))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
+func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(c), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
+func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (l *Linger) SizeBytes() int {
- return 8
+func (sxts *StatxTimestamp) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (l *Linger) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
+func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (l *Linger) UnmarshalBytes(src []byte) {
- l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
+ sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- l.Linger = int32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (l *Linger) Packed() bool {
+func (sxts *StatxTimestamp) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (l *Linger) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(l))
+func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(sxts))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (l *Linger) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(l), src)
+func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(sxts), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return l.CopyOutN(cc, addr, l.SizeBytes())
+func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
+func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (c *ControlMessageCredentials) SizeBytes() int {
- return 12
+func (u *Utime) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
- dst = dst[4:]
+func (u *Utime) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
- c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (u *Utime) UnmarshalBytes(src []byte) {
+ u.Actime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (c *ControlMessageCredentials) Packed() bool {
+func (u *Utime) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(c))
+func (u *Utime) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(c), src)
+func (u *Utime) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return c.CopyOutN(cc, addr, c.SizeBytes())
+func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
+func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrInet) SizeBytes() int {
- return 4 +
- (*InetAddr)(nil).SizeBytes() +
- 1*8
+//go:nosplit
+func (t *TimeT) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrInet) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
- dst = dst[2:]
- s.Addr.MarshalBytes(dst[:s.Addr.SizeBytes()])
- dst = dst[s.Addr.SizeBytes():]
- // Padding: dst[:sizeof(uint8)*8] ~= [8]uint8{0}
- dst = dst[1*(8):]
+func (t *TimeT) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrInet) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Addr.UnmarshalBytes(src[:s.Addr.SizeBytes()])
- src = src[s.Addr.SizeBytes():]
- // Padding: ~ copy([8]uint8(s._), src[:sizeof(uint8)*8])
- src = src[1*(8):]
+func (t *TimeT) UnmarshalBytes(src []byte) {
+ *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrInet) Packed() bool {
- return s.Addr.Packed()
+func (t *TimeT) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrInet) MarshalUnsafe(dst []byte) {
- if s.Addr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SockAddrInet doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
+func (t *TimeT) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrInet) UnmarshalUnsafe(src []byte) {
- if s.Addr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SockAddrInet doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
+func (t *TimeT) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrInet) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrInet) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
+func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrInet) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrInet) WriteTo(writer io.Writer) (int64, error) {
- if !s.Addr.Packed() {
- // Type SockAddrInet doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -9282,371 +9561,41 @@ func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Tms) SizeBytes() int {
- return 0 +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes() +
- (*ClockT)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Tms) MarshalBytes(dst []byte) {
- t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()])
- dst = dst[t.UTime.SizeBytes():]
- t.STime.MarshalBytes(dst[:t.STime.SizeBytes()])
- dst = dst[t.STime.SizeBytes():]
- t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()])
- dst = dst[t.CUTime.SizeBytes():]
- t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()])
- dst = dst[t.CSTime.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Tms) UnmarshalBytes(src []byte) {
- t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()])
- src = src[t.UTime.SizeBytes():]
- t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()])
- src = src[t.STime.SizeBytes():]
- t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()])
- src = src[t.CUTime.SizeBytes():]
- t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()])
- src = src[t.CSTime.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (t *Tms) Packed() bool {
- return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *Tms) MarshalUnsafe(dst []byte) {
- if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
- } else {
- // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
- t.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *Tms) UnmarshalUnsafe(src []byte) {
- if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- safecopy.CopyOut(unsafe.Pointer(t), src)
- } else {
- // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- t.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
- t.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- t.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
- if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
- // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, t.SizeBytes())
- t.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
- // must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (sxts *StatxTimestamp) SizeBytes() int {
- return 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
- dst = dst[4:]
- // Padding: dst[:sizeof(int32)] ~= int32(0)
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
- sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ int32 ~= src[:sizeof(int32)]
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (sxts *StatxTimestamp) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(sxts))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(sxts), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *Utime) SizeBytes() int {
- return 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Utime) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
- dst = dst[8:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Utime) UnmarshalBytes(src []byte) {
- u.Actime = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *Utime) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Utime) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Utime) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimeT) SizeBytes() int {
- return 8
+func (t *TimerID) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimeT) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+func (t *TimerID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimeT) UnmarshalBytes(src []byte) {
- *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (t *TimerID) UnmarshalBytes(src []byte) {
+ *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *TimeT) Packed() bool {
+func (t *TimerID) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimeT) MarshalUnsafe(dst []byte) {
+func (t *TimerID) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(t))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimeT) UnmarshalUnsafe(src []byte) {
+func (t *TimerID) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(t), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9663,13 +9612,13 @@ func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9685,7 +9634,7 @@ func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -9969,41 +9918,74 @@ func (c *ClockT) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimerID) SizeBytes() int {
- return 4
+func (t *Tms) SizeBytes() int {
+ return 0 +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes() +
+ (*ClockT)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimerID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*t))
+func (t *Tms) MarshalBytes(dst []byte) {
+ t.UTime.MarshalBytes(dst[:t.UTime.SizeBytes()])
+ dst = dst[t.UTime.SizeBytes():]
+ t.STime.MarshalBytes(dst[:t.STime.SizeBytes()])
+ dst = dst[t.STime.SizeBytes():]
+ t.CUTime.MarshalBytes(dst[:t.CUTime.SizeBytes()])
+ dst = dst[t.CUTime.SizeBytes():]
+ t.CSTime.MarshalBytes(dst[:t.CSTime.SizeBytes()])
+ dst = dst[t.CSTime.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimerID) UnmarshalBytes(src []byte) {
- *t = TimerID(int32(usermem.ByteOrder.Uint32(src[:4])))
+func (t *Tms) UnmarshalBytes(src []byte) {
+ t.UTime.UnmarshalBytes(src[:t.UTime.SizeBytes()])
+ src = src[t.UTime.SizeBytes():]
+ t.STime.UnmarshalBytes(src[:t.STime.SizeBytes()])
+ src = src[t.STime.SizeBytes():]
+ t.CUTime.UnmarshalBytes(src[:t.CUTime.SizeBytes()])
+ src = src[t.CUTime.SizeBytes():]
+ t.CSTime.UnmarshalBytes(src[:t.CSTime.SizeBytes()])
+ src = src[t.CSTime.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *TimerID) Packed() bool {
- // Scalar newtypes are always packed.
- return true
+func (t *Tms) Packed() bool {
+ return t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimerID) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
+func (t *Tms) MarshalUnsafe(dst []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to MarshalBytes.
+ t.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimerID) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
+func (t *Tms) UnmarshalUnsafe(src []byte) {
+ if t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+ } else {
+ // Type Tms doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ t.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (t *Tms) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ t.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10020,13 +10002,23 @@ func (t *TimerID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimerID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return t.CopyOutN(cc, addr, t.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (t *Tms) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(t.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ t.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10042,7 +10034,15 @@ func (t *TimerID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
+func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
+ if !t.CSTime.Packed() && t.CUTime.Packed() && t.STime.Packed() && t.UTime.Packed() {
+ // Type Tms doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, t.SizeBytes())
+ t.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -10050,7 +10050,7 @@ func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
hdr.Len = t.SizeBytes()
hdr.Cap = t.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that t
// must live until the use above.
runtime.KeepAlive(t) // escapes: replaced by intrinsic.
diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
index 28fcdbaa1..1993ca3ba 100644
--- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
+++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -25,40 +25,40 @@ var _ marshal.Marshallable = (*Uint8)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int64) SizeBytes() int {
- return 8
+func (i *Int8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
+func (i *Int8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*i)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int64) UnmarshalBytes(src []byte) {
- *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (i *Int8) UnmarshalBytes(src []byte) {
+ *i = Int8(int8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int64) Packed() bool {
+func (i *Int8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int64) MarshalUnsafe(dst []byte) {
+func (i *Int8) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int64) UnmarshalUnsafe(src []byte) {
+func (i *Int8) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -75,13 +75,13 @@ func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -97,7 +97,7 @@ func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int64) WriteTo(w io.Writer) (int64, error) {
+func (i *Int8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -112,14 +112,14 @@ func (i *Int64) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
+// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
//go:nosplit
-func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
+func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -138,14 +138,14 @@ func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (i
return length, err
}
-// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
+// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
//go:nosplit
-func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
+func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -164,13 +164,13 @@ func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (
return length, err
}
-// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
-func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
+// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
+func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -182,13 +182,13 @@ func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
-func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
+// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
+func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -202,40 +202,40 @@ func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint64) SizeBytes() int {
- return 8
+func (u *Uint8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
+func (u *Uint8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*u)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint64) UnmarshalBytes(src []byte) {
- *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (u *Uint8) UnmarshalBytes(src []byte) {
+ *u = Uint8(uint8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint64) Packed() bool {
+func (u *Uint8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint64) MarshalUnsafe(dst []byte) {
+func (u *Uint8) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint64) UnmarshalUnsafe(src []byte) {
+func (u *Uint8) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -252,13 +252,13 @@ func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -274,7 +274,7 @@ func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -289,14 +289,14 @@ func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
+// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
//go:nosplit
-func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
+func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -315,14 +315,14 @@ func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64)
return length, err
}
-// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
+// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
//go:nosplit
-func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
+func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -341,13 +341,13 @@ func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64)
return length, err
}
-// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
-func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
+// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
+func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -359,13 +359,13 @@ func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
-func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
+// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
+func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -379,40 +379,40 @@ func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int8) SizeBytes() int {
- return 1
+func (i *Int16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int8) MarshalBytes(dst []byte) {
- dst[0] = byte(*i)
+func (i *Int16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int8) UnmarshalBytes(src []byte) {
- *i = Int8(int8(src[0]))
+func (i *Int16) UnmarshalBytes(src []byte) {
+ *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int8) Packed() bool {
+func (i *Int16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int8) MarshalUnsafe(dst []byte) {
+func (i *Int16) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int8) UnmarshalUnsafe(src []byte) {
+func (i *Int16) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -429,13 +429,13 @@ func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -451,7 +451,7 @@ func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int8) WriteTo(w io.Writer) (int64, error) {
+func (i *Int16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -466,14 +466,14 @@ func (i *Int8) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
+// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
//go:nosplit
-func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
+func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -492,14 +492,14 @@ func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int
return length, err
}
-// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
+// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
//go:nosplit
-func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
+func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -518,13 +518,13 @@ func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (in
return length, err
}
-// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
-func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
+// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
+func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -536,13 +536,13 @@ func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
-func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
+// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
+func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -556,40 +556,40 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint8) SizeBytes() int {
- return 1
+func (u *Uint16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint8) MarshalBytes(dst []byte) {
- dst[0] = byte(*u)
+func (u *Uint16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint8) UnmarshalBytes(src []byte) {
- *u = Uint8(uint8(src[0]))
+func (u *Uint16) UnmarshalBytes(src []byte) {
+ *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint8) Packed() bool {
+func (u *Uint16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint8) MarshalUnsafe(dst []byte) {
+func (u *Uint16) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint8) UnmarshalUnsafe(src []byte) {
+func (u *Uint16) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -606,13 +606,13 @@ func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -628,7 +628,7 @@ func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -643,14 +643,14 @@ func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
+// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
//go:nosplit
-func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
+func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -669,14 +669,14 @@ func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (i
return length, err
}
-// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
+// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
//go:nosplit
-func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
+func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -695,13 +695,13 @@ func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (
return length, err
}
-// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
-func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
+// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
+func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -713,13 +713,13 @@ func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
-func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
+// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
+func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -733,40 +733,40 @@ func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int16) SizeBytes() int {
- return 2
+func (i *Int32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
+func (i *Int32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int16) UnmarshalBytes(src []byte) {
- *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
+func (i *Int32) UnmarshalBytes(src []byte) {
+ *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int16) Packed() bool {
+func (i *Int32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int16) MarshalUnsafe(dst []byte) {
+func (i *Int32) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int16) UnmarshalUnsafe(src []byte) {
+func (i *Int32) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -783,13 +783,13 @@ func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -805,7 +805,7 @@ func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int16) WriteTo(w io.Writer) (int64, error) {
+func (i *Int32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -820,14 +820,14 @@ func (i *Int16) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
+// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
//go:nosplit
-func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
+func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -846,14 +846,14 @@ func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (i
return length, err
}
-// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
+// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
//go:nosplit
-func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
+func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -872,13 +872,13 @@ func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (
return length, err
}
-// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
-func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
+// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
+func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -890,13 +890,13 @@ func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
-func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
+// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
+func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -910,40 +910,40 @@ func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint16) SizeBytes() int {
- return 2
+func (u *Uint32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
+func (u *Uint32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint16) UnmarshalBytes(src []byte) {
- *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
+func (u *Uint32) UnmarshalBytes(src []byte) {
+ *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint16) Packed() bool {
+func (u *Uint32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint16) MarshalUnsafe(dst []byte) {
+func (u *Uint32) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint16) UnmarshalUnsafe(src []byte) {
+func (u *Uint32) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -960,13 +960,13 @@ func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -982,7 +982,7 @@ func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -997,14 +997,14 @@ func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
+// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
//go:nosplit
-func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
+func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1023,14 +1023,14 @@ func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16)
return length, err
}
-// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
+// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
//go:nosplit
-func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
+func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1049,13 +1049,13 @@ func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16)
return length, err
}
-// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
-func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
+// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
+func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1067,13 +1067,13 @@ func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
-func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
+// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
+func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1087,40 +1087,40 @@ func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int32) SizeBytes() int {
- return 4
+func (i *Int64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
+func (i *Int64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int32) UnmarshalBytes(src []byte) {
- *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
+func (i *Int64) UnmarshalBytes(src []byte) {
+ *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int32) Packed() bool {
+func (i *Int64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int32) MarshalUnsafe(dst []byte) {
+func (i *Int64) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int32) UnmarshalUnsafe(src []byte) {
+func (i *Int64) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1137,13 +1137,13 @@ func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1159,7 +1159,7 @@ func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int32) WriteTo(w io.Writer) (int64, error) {
+func (i *Int64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1174,14 +1174,14 @@ func (i *Int32) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
+// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
//go:nosplit
-func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
+func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1200,14 +1200,14 @@ func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (i
return length, err
}
-// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
+// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
//go:nosplit
-func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
+func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1226,13 +1226,13 @@ func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (
return length, err
}
-// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
-func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
+// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
+func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1244,13 +1244,13 @@ func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
-func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
+// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
+func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1264,40 +1264,40 @@ func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint32) SizeBytes() int {
- return 4
+func (u *Uint64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
+func (u *Uint64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint32) UnmarshalBytes(src []byte) {
- *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (u *Uint64) UnmarshalBytes(src []byte) {
+ *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint32) Packed() bool {
+func (u *Uint64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint32) MarshalUnsafe(dst []byte) {
+func (u *Uint64) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint32) UnmarshalUnsafe(src []byte) {
+func (u *Uint64) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1314,13 +1314,13 @@ func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1336,7 +1336,7 @@ func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1351,14 +1351,14 @@ func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
-// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
+// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
//go:nosplit
-func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
+func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1377,14 +1377,14 @@ func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32)
return length, err
}
-// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
+// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
//go:nosplit
-func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
+func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1403,13 +1403,13 @@ func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32)
return length, err
}
-// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
-func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
+// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
+func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1421,13 +1421,13 @@ func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
-func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
+// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
+func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index 1bd458b68..fe68f921d 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -23,116 +23,6 @@ var _ marshal.Marshallable = (*SignalStack)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalInfo) SizeBytes() int {
- return 16 +
- 1*(128-16)
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- for idx := 0; idx < (128-16); idx++ {
- dst[0] = byte(s.Fields[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalInfo) UnmarshalBytes(src []byte) {
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- for idx := 0; idx < (128-16); idx++ {
- s.Fields[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalInfo) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalAct) SizeBytes() int {
return 24 +
(*linux.SignalSet)(nil).SizeBytes()
@@ -370,3 +260,113 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalInfo) SizeBytes() int {
+ return 16 +
+ 1*(128-16)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalInfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ dst[0] = byte(s.Fields[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalInfo) UnmarshalBytes(src []byte) {
+ s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ s.Fields[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalInfo) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index b8667cdb9..aac25375e 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -27,6 +27,174 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes() +
+ 1*3568
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ dst[0] = byte(s.Reserved[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ s.Reserved[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (a *aarch64Ctx) SizeBytes() int {
return 8
}
@@ -422,171 +590,3 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalContext64) SizeBytes() int {
- return 32 +
- 8*31 +
- 1*8 +
- (*FpsimdContext)(nil).SizeBytes() +
- 1*3568
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
- dst = dst[8:]
- for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
- dst = dst[8:]
- }
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
- dst = dst[8:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s._pad[idx])
- dst = dst[1:]
- }
- s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
- dst = dst[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- dst[0] = byte(s.Reserved[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- }
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 8; idx++ {
- s._pad[idx] = src[0]
- src = src[1:]
- }
- s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
- src = src[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- s.Reserved[idx] = uint8(src[0])
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalContext64) Packed() bool {
- return s.Fpsimd64.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalContext64) MarshalUnsafe(dst []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
index 9246cf66e..155e7abcb 100644
--- a/pkg/sentry/fsimpl/devpts/root_inode_refs.go
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -54,11 +54,6 @@ func (r *rootInodeRefs) LogRefs() bool {
return rootInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *rootInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *rootInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *rootInodeRefs) TryIncRef() bool {
func (r *rootInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if rootInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *rootInodeRefs) DecRef(destroy func()) {
func (r *rootInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
index 37a39e976..e221f3b41 100644
--- a/pkg/sentry/fsimpl/fuse/inode_refs.go
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool {
return inodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *inodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool {
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if inodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) {
func (r *inodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index 3f5f4ebc3..d784166ae 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -54,11 +54,6 @@ func (r *ConnectedEndpointRefs) LogRefs() bool {
return ConnectedEndpointenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *ConnectedEndpointRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ConnectedEndpointRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *ConnectedEndpointRefs) TryIncRef() bool {
func (r *ConnectedEndpointRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if ConnectedEndpointenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *ConnectedEndpointRefs) DecRef(destroy func()) {
func (r *ConnectedEndpointRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index 4c850a7ac..8862c4a5e 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool {
return inodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *inodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool {
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if inodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) {
func (r *inodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
index cdf6374a3..88cba9456 100644
--- a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -54,11 +54,6 @@ func (r *StaticDirectoryRefs) LogRefs() bool {
return StaticDirectoryenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *StaticDirectoryRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *StaticDirectoryRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *StaticDirectoryRefs) TryIncRef() bool {
func (r *StaticDirectoryRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if StaticDirectoryenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *StaticDirectoryRefs) DecRef(destroy func()) {
func (r *StaticDirectoryRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
index 69b41668a..4409edd35 100644
--- a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
@@ -54,11 +54,6 @@ func (r *syntheticDirectoryRefs) LogRefs() bool {
return syntheticDirectoryenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *syntheticDirectoryRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *syntheticDirectoryRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *syntheticDirectoryRefs) TryIncRef() bool {
func (r *syntheticDirectoryRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if syntheticDirectoryenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *syntheticDirectoryRefs) DecRef(destroy func()) {
func (r *syntheticDirectoryRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
index 4644809bd..390927c63 100644
--- a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -54,11 +54,6 @@ func (r *fdDirInodeRefs) LogRefs() bool {
return fdDirInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *fdDirInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *fdDirInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *fdDirInodeRefs) TryIncRef() bool {
func (r *fdDirInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if fdDirInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *fdDirInodeRefs) DecRef(destroy func()) {
func (r *fdDirInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
index dbc7e3f5a..5c44e617f 100644
--- a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -54,11 +54,6 @@ func (r *fdInfoDirInodeRefs) LogRefs() bool {
return fdInfoDirInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *fdInfoDirInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *fdInfoDirInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *fdInfoDirInodeRefs) TryIncRef() bool {
func (r *fdInfoDirInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if fdInfoDirInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *fdInfoDirInodeRefs) DecRef(destroy func()) {
func (r *fdInfoDirInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
index 993251646..4b655a383 100644
--- a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -54,11 +54,6 @@ func (r *subtasksInodeRefs) LogRefs() bool {
return subtasksInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *subtasksInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *subtasksInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *subtasksInodeRefs) TryIncRef() bool {
func (r *subtasksInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if subtasksInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *subtasksInodeRefs) DecRef(destroy func()) {
func (r *subtasksInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
index 632251e75..1bc355b12 100644
--- a/pkg/sentry/fsimpl/proc/task_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -54,11 +54,6 @@ func (r *taskInodeRefs) LogRefs() bool {
return taskInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *taskInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *taskInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *taskInodeRefs) TryIncRef() bool {
func (r *taskInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if taskInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *taskInodeRefs) DecRef(destroy func()) {
func (r *taskInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
index 0b2af4269..af0c4f025 100644
--- a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -54,11 +54,6 @@ func (r *tasksInodeRefs) LogRefs() bool {
return tasksInodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *tasksInodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *tasksInodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *tasksInodeRefs) TryIncRef() bool {
func (r *tasksInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if tasksInodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *tasksInodeRefs) DecRef(destroy func()) {
func (r *tasksInodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
index a45aa7f78..0f8a6e0f2 100644
--- a/pkg/sentry/fsimpl/sys/dir_refs.go
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -54,11 +54,6 @@ func (r *dirRefs) LogRefs() bool {
return direnableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *dirRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *dirRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *dirRefs) TryIncRef() bool {
func (r *dirRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if direnableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *dirRefs) DecRef(destroy func()) {
func (r *dirRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index 51ee15409..a30d79603 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool {
return inodeenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *inodeRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool {
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if inodeenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) {
func (r *inodeRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go
index f540ba371..ddbe50261 100644
--- a/pkg/sentry/kernel/fd_table_refs.go
+++ b/pkg/sentry/kernel/fd_table_refs.go
@@ -54,11 +54,6 @@ func (r *FDTableRefs) LogRefs() bool {
return FDTableenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *FDTableRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FDTableRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *FDTableRefs) TryIncRef() bool {
func (r *FDTableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if FDTableenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *FDTableRefs) DecRef(destroy func()) {
func (r *FDTableRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go
index 6510157c7..d97c82fc9 100644
--- a/pkg/sentry/kernel/fs_context_refs.go
+++ b/pkg/sentry/kernel/fs_context_refs.go
@@ -54,11 +54,6 @@ func (r *FSContextRefs) LogRefs() bool {
return FSContextenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *FSContextRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FSContextRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *FSContextRefs) TryIncRef() bool {
func (r *FSContextRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if FSContextenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *FSContextRefs) DecRef(destroy func()) {
func (r *FSContextRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go
index c0acf2f50..a2a2d3458 100644
--- a/pkg/sentry/kernel/ipc_namespace_refs.go
+++ b/pkg/sentry/kernel/ipc_namespace_refs.go
@@ -54,11 +54,6 @@ func (r *IPCNamespaceRefs) LogRefs() bool {
return IPCNamespaceenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *IPCNamespaceRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *IPCNamespaceRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *IPCNamespaceRefs) TryIncRef() bool {
func (r *IPCNamespaceRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if IPCNamespaceenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *IPCNamespaceRefs) DecRef(destroy func()) {
func (r *IPCNamespaceRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go
index a9cc69b35..51b0fcd34 100644
--- a/pkg/sentry/kernel/process_group_refs.go
+++ b/pkg/sentry/kernel/process_group_refs.go
@@ -54,11 +54,6 @@ func (r *ProcessGroupRefs) LogRefs() bool {
return ProcessGroupenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *ProcessGroupRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ProcessGroupRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *ProcessGroupRefs) TryIncRef() bool {
func (r *ProcessGroupRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if ProcessGroupenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *ProcessGroupRefs) DecRef(destroy func()) {
func (r *ProcessGroupRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go
index 0856ff261..84919943d 100644
--- a/pkg/sentry/kernel/session_refs.go
+++ b/pkg/sentry/kernel/session_refs.go
@@ -54,11 +54,6 @@ func (r *SessionRefs) LogRefs() bool {
return SessionenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *SessionRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *SessionRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *SessionRefs) TryIncRef() bool {
func (r *SessionRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if SessionenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *SessionRefs) DecRef(destroy func()) {
func (r *SessionRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go
index 82ca1ed06..f550c7ca1 100644
--- a/pkg/sentry/kernel/shm/shm_refs.go
+++ b/pkg/sentry/kernel/shm/shm_refs.go
@@ -54,11 +54,6 @@ func (r *ShmRefs) LogRefs() bool {
return ShmenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *ShmRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ShmRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *ShmRefs) TryIncRef() bool {
func (r *ShmRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if ShmenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *ShmRefs) DecRef(destroy func()) {
func (r *ShmRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
index 500477c1f..4b87070bf 100644
--- a/pkg/sentry/mm/aio_mappable_refs.go
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -54,11 +54,6 @@ func (r *aioMappableRefs) LogRefs() bool {
return aioMappableenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *aioMappableRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *aioMappableRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *aioMappableRefs) TryIncRef() bool {
func (r *aioMappableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if aioMappableenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *aioMappableRefs) DecRef(destroy func()) {
func (r *aioMappableRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
index 60b4b7e92..6c89e0f7e 100644
--- a/pkg/sentry/mm/special_mappable_refs.go
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -54,11 +54,6 @@ func (r *SpecialMappableRefs) LogRefs() bool {
return SpecialMappableenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *SpecialMappableRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *SpecialMappableRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *SpecialMappableRefs) TryIncRef() bool {
func (r *SpecialMappableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if SpecialMappableenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *SpecialMappableRefs) DecRef(destroy func()) {
func (r *SpecialMappableRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index e69a17ca8..2a7fcb253 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -54,11 +54,6 @@ func (r *socketOperationsRefs) LogRefs() bool {
return socketOperationsenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *socketOperationsRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *socketOperationsRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *socketOperationsRefs) TryIncRef() bool {
func (r *socketOperationsRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if socketOperationsenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *socketOperationsRefs) DecRef(destroy func()) {
func (r *socketOperationsRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/socket/unix/socket_vfs2_refs.go b/pkg/sentry/socket/unix/socket_vfs2_refs.go
index d9bdba0b3..f10033260 100644
--- a/pkg/sentry/socket/unix/socket_vfs2_refs.go
+++ b/pkg/sentry/socket/unix/socket_vfs2_refs.go
@@ -54,11 +54,6 @@ func (r *socketVFS2Refs) LogRefs() bool {
return socketVFS2enableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *socketVFS2Refs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *socketVFS2Refs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *socketVFS2Refs) TryIncRef() bool {
func (r *socketVFS2Refs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if socketVFS2enableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *socketVFS2Refs) DecRef(destroy func()) {
func (r *socketVFS2Refs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go
index 679cb40e4..42c5b7ce0 100644
--- a/pkg/sentry/socket/unix/transport/queue_refs.go
+++ b/pkg/sentry/socket/unix/transport/queue_refs.go
@@ -54,11 +54,6 @@ func (r *queueRefs) LogRefs() bool {
return queueenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *queueRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *queueRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *queueRefs) TryIncRef() bool {
func (r *queueRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if queueenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *queueRefs) DecRef(destroy func()) {
func (r *queueRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index 6886c63d1..956643160 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -458,26 +458,49 @@ func (u *userSockFprog) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *multipleMessageHeader64) SizeBytes() int {
- return 8 +
- (*MessageHeader64)(nil).SizeBytes()
+func (m *MessageHeader64) SizeBytes() int {
+ return 56
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
- m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
- dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+func (m *MessageHeader64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
- m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
- src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *MessageHeader64) UnmarshalBytes(src []byte) {
+ m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -485,40 +508,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *multipleMessageHeader64) Packed() bool {
- return m.msgHdr.Packed()
+func (m *MessageHeader64) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- m.MarshalBytes(dst)
- }
+func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(m), src)
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- m.UnmarshalBytes(src)
- }
+func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- m.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -535,23 +541,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- m.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -567,15 +563,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, m.SizeBytes())
- m.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -591,49 +579,26 @@ func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *MessageHeader64) SizeBytes() int {
- return 56
+func (m *multipleMessageHeader64) SizeBytes() int {
+ return 8 +
+ (*MessageHeader64)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
+ m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
+ dst = dst[m.msgHdr.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
+ m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
+ src = src[m.msgHdr.SizeBytes():]
+ m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -641,23 +606,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *MessageHeader64) Packed() bool {
- return true
+func (m *multipleMessageHeader64) Packed() bool {
+ return m.msgHdr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
+func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(m), src)
+func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -674,13 +656,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -696,7 +688,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
index 1e6d3f5af..5d4634ba7 100644
--- a/pkg/sentry/vfs/file_description_refs.go
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -54,11 +54,6 @@ func (r *FileDescriptionRefs) LogRefs() bool {
return FileDescriptionenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *FileDescriptionRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FileDescriptionRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *FileDescriptionRefs) TryIncRef() bool {
func (r *FileDescriptionRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if FileDescriptionenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *FileDescriptionRefs) DecRef(destroy func()) {
func (r *FileDescriptionRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
index 75da47bef..2a98a8ead 100644
--- a/pkg/sentry/vfs/filesystem_refs.go
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -54,11 +54,6 @@ func (r *FilesystemRefs) LogRefs() bool {
return FilesystemenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *FilesystemRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FilesystemRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *FilesystemRefs) TryIncRef() bool {
func (r *FilesystemRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if FilesystemenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *FilesystemRefs) DecRef(destroy func()) {
func (r *FilesystemRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
index bd79fb8a7..d5d48a7cd 100644
--- a/pkg/sentry/vfs/mount_namespace_refs.go
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -54,11 +54,6 @@ func (r *MountNamespaceRefs) LogRefs() bool {
return MountNamespaceenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *MountNamespaceRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *MountNamespaceRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *MountNamespaceRefs) TryIncRef() bool {
func (r *MountNamespaceRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if MountNamespaceenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *MountNamespaceRefs) DecRef(destroy func()) {
func (r *MountNamespaceRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}
diff --git a/pkg/tcpip/link/tun/tun_endpoint_refs.go b/pkg/tcpip/link/tun/tun_endpoint_refs.go
index dc164902b..276cbdb20 100644
--- a/pkg/tcpip/link/tun/tun_endpoint_refs.go
+++ b/pkg/tcpip/link/tun/tun_endpoint_refs.go
@@ -54,11 +54,6 @@ func (r *tunEndpointRefs) LogRefs() bool {
return tunEndpointenableLogging
}
-// EnableLeakCheck enables reference leak checking on r.
-func (r *tunEndpointRefs) EnableLeakCheck() {
- refsvfs2.Register(r)
-}
-
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *tunEndpointRefs) ReadRefs() int64 {
@@ -115,7 +110,7 @@ func (r *tunEndpointRefs) TryIncRef() bool {
func (r *tunEndpointRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
if tunEndpointenableLogging {
- refsvfs2.LogDecRef(r, v+1)
+ refsvfs2.LogDecRef(r, v)
}
switch {
case v < 0:
@@ -132,6 +127,6 @@ func (r *tunEndpointRefs) DecRef(destroy func()) {
func (r *tunEndpointRefs) afterLoad() {
if r.ReadRefs() > 0 {
- r.EnableLeakCheck()
+ refsvfs2.Register(r)
}
}