summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-12-12 23:38:21 +0000
committergVisor bot <gvisor-bot@google.com>2020-12-12 23:38:21 +0000
commit7582c6ce6b2f176657b4f43ecbd437998ed742da (patch)
treefb590707a6a266b5ad4952081771c73ab65f891d /pkg
parent5399efbe29e89cbd2865ace6505adb3d6deee9d5 (diff)
parent08d36b6c630846c4d9d812c25966639513ecd211 (diff)
Merge release-20201208.0-40-g08d36b6c6 (automated)
Diffstat (limited to 'pkg')
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go3398
-rw-r--r--pkg/marshal/primitive/primitive_abi_autogen_unsafe.go688
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go286
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go336
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go142
-rw-r--r--pkg/tcpip/network/fragmentation/frag_heap.go77
-rw-r--r--pkg/tcpip/network/fragmentation/fragmentation.go4
-rw-r--r--pkg/tcpip/network/fragmentation/reassembler.go127
8 files changed, 2498 insertions, 2560 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 9464dabc7..7950ca774 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -1352,49 +1352,49 @@ func (s *Statfs) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrIn) SizeBytes() int {
+func (f *FUSEOpenOut) SizeBytes() int {
return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
+func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
- f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEGetAttrIn) Packed() bool {
+func (f *FUSEOpenOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1411,13 +1411,13 @@ func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1433,7 +1433,7 @@ func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1449,19 +1449,19 @@ func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteIn) SizeBytes() int {
+func (f *FUSEReadIn) SizeBytes() int {
return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
+func (f *FUSEReadIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
@@ -1472,14 +1472,14 @@ func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
+func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
@@ -1491,23 +1491,23 @@ func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteIn) Packed() bool {
+func (f *FUSEReadIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1524,13 +1524,13 @@ func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1546,7 +1546,7 @@ func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1562,53 +1562,65 @@ func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReleaseIn) SizeBytes() int {
- return 24
+func (f *FUSEWriteIn) SizeBytes() int {
+ return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+func (f *FUSEWriteIn) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.WriteFlags))
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+func (f *FUSEWriteIn) UnmarshalBytes(src []byte) {
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.WriteFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReleaseIn) Packed() bool {
+func (f *FUSEWriteIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1625,13 +1637,13 @@ func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1647,7 +1659,7 @@ func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1663,41 +1675,45 @@ func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpID) SizeBytes() int {
+func (f *FUSEMkdirMeta) SizeBytes() int {
return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
+func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpID) UnmarshalBytes(src []byte) {
- *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpID) Packed() bool {
- // Scalar newtypes are always packed.
+func (f *FUSEMkdirMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
+func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1714,13 +1730,13 @@ func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1736,7 +1752,7 @@ func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1744,7 +1760,7 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -1752,88 +1768,53 @@ func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderIn) SizeBytes() int {
- return 28 +
- (*FUSEOpcode)(nil).SizeBytes() +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSEDirentMeta) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
- dst = dst[4:]
- f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
- dst = dst[f.Opcode.SizeBytes():]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
- src = src[f.Opcode.SizeBytes():]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
+ f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderIn) Packed() bool {
- return f.Opcode.Packed() && f.Unique.Packed()
+func (f *FUSEDirentMeta) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
- if f.Opcode.Packed() && f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1850,23 +1831,13 @@ func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1882,15 +1853,7 @@ func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
- if !f.Opcode.Packed() && f.Unique.Packed() {
- // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1906,83 +1869,82 @@ func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEEntryOut) SizeBytes() int {
- return 40 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSEInitOut) SizeBytes() int {
+ return 32 +
+ 4*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
+func (f *FUSEInitOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint16)] ~= uint16(0)
+ dst = dst[2:]
+ // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
+ dst = dst[4*(8):]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
- f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ // Padding: var _ uint16 ~= src[:sizeof(uint16)]
+ src = src[2:]
+ // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
+ src = src[4*(8):]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEEntryOut) Packed() bool {
- return f.Attr.Packed()
+func (f *FUSEInitOut) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -1999,23 +1961,13 @@ func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2031,15 +1983,7 @@ func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2055,45 +1999,53 @@ func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEWriteOut) SizeBytes() int {
- return 8
+func (f *FUSEInitIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
+func (f *FUSEInitIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
+ f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEWriteOut) Packed() bool {
+func (f *FUSEInitIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2110,13 +2062,13 @@ func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2132,7 +2084,7 @@ func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2148,101 +2100,49 @@ func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSESetAttrIn) SizeBytes() int {
- return 88
+func (f *FUSEGetAttrIn) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+func (f *FUSEGetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GetAttrFlags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
- f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEGetAttrIn) UnmarshalBytes(src []byte) {
+ f.GetAttrFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSESetAttrIn) Packed() bool {
+func (f *FUSEGetAttrIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEGetAttrIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEGetAttrIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2259,13 +2159,13 @@ func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2281,7 +2181,7 @@ func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEGetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2297,53 +2197,101 @@ func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEDirentMeta) SizeBytes() int {
- return 24
+func (f *FUSEAttr) SizeBytes() int {
+ return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEDirentMeta) MarshalBytes(dst []byte) {
+func (f *FUSEAttr) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Off))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.NameLen))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Type))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEDirentMeta) UnmarshalBytes(src []byte) {
+func (f *FUSEAttr) UnmarshalBytes(src []byte) {
f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Type = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEDirentMeta) Packed() bool {
+func (f *FUSEAttr) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEDirentMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEDirentMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2360,13 +2308,13 @@ func (f *FUSEDirentMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEDirentMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2382,7 +2330,7 @@ func (f *FUSEDirentMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2398,41 +2346,45 @@ func (f *FUSEDirentMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (f *FUSEOpcode) SizeBytes() int {
- return 4
+func (f *FUSEOpenIn) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpcode) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
+func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
- *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpcode) Packed() bool {
- // Scalar newtypes are always packed.
+func (f *FUSEOpenIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2449,13 +2401,13 @@ func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2471,7 +2423,7 @@ func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
+func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2479,7 +2431,7 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -2487,67 +2439,53 @@ func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEHeaderOut) SizeBytes() int {
- return 8 +
- (*FUSEOpID)(nil).SizeBytes()
+func (f *FUSECreateMeta) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
- dst = dst[f.Unique.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
- f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
+ f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
- src = src[f.Unique.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEHeaderOut) Packed() bool {
- return f.Unique.Packed()
+func (f *FUSECreateMeta) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
- if f.Unique.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
- if f.Unique.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2564,23 +2502,13 @@ func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2596,15 +2524,7 @@ func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Unique.Packed() {
- // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2620,77 +2540,29 @@ func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEAttr) SizeBytes() int {
- return 88
+func (f *FUSEMknodMeta) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEAttr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ino))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Blocks))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
- dst = dst[4:]
+func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Nlink))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
- dst = dst[4:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.BlkSize))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEAttr) UnmarshalBytes(src []byte) {
- f.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Blocks = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Nlink = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.BlkSize = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2698,23 +2570,23 @@ func (f *FUSEAttr) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEAttr) Packed() bool {
+func (f *FUSEMknodMeta) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEAttr) MarshalUnsafe(dst []byte) {
+func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEAttr) UnmarshalUnsafe(src []byte) {
+func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2731,13 +2603,13 @@ func (f *FUSEAttr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEAttr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2753,7 +2625,7 @@ func (f *FUSEAttr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2769,21 +2641,47 @@ func (f *FUSEAttr) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenIn) SizeBytes() int {
- return 8
+func (f *FUSEHeaderIn) SizeBytes() int {
+ return 28 +
+ (*FUSEOpcode)(nil).SizeBytes() +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+func (f *FUSEHeaderIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
+ dst = dst[4:]
+ f.Opcode.MarshalBytes(dst[:f.Opcode.SizeBytes()])
+ dst = dst[f.Opcode.SizeBytes():]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.PID))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Opcode.UnmarshalBytes(src[:f.Opcode.SizeBytes()])
+ src = src[f.Opcode.SizeBytes():]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.PID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -2791,23 +2689,40 @@ func (f *FUSEOpenIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenIn) Packed() bool {
- return true
+func (f *FUSEHeaderIn) Packed() bool {
+ return f.Opcode.Packed() && f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenIn) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEHeaderIn) MarshalUnsafe(dst []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenIn) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
+ if f.Opcode.Packed() && f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2824,13 +2739,23 @@ func (f *FUSEOpenIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2846,7 +2771,15 @@ func (f *FUSEOpenIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEHeaderIn) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
+ // Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2862,49 +2795,67 @@ func (f *FUSEOpenIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEOpenOut) SizeBytes() int {
- return 16
+func (f *FUSEHeaderOut) SizeBytes() int {
+ return 8 +
+ (*FUSEOpID)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEOpenOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.OpenFlag))
+func (f *FUSEHeaderOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Len))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Error))
dst = dst[4:]
+ f.Unique.MarshalBytes(dst[:f.Unique.SizeBytes()])
+ dst = dst[f.Unique.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEOpenOut) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.OpenFlag = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEHeaderOut) UnmarshalBytes(src []byte) {
+ f.Len = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.Error = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Unique.UnmarshalBytes(src[:f.Unique.SizeBytes()])
+ src = src[f.Unique.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEOpenOut) Packed() bool {
- return true
+func (f *FUSEHeaderOut) Packed() bool {
+ return f.Unique.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEOpenOut) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEHeaderOut) MarshalUnsafe(dst []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEOpenOut) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEHeaderOut) UnmarshalUnsafe(src []byte) {
+ if f.Unique.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEHeaderOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2921,13 +2872,23 @@ func (f *FUSEOpenOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEOpenOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEHeaderOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2943,7 +2904,15 @@ func (f *FUSEOpenOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEHeaderOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Unique.Packed() {
+ // Type FUSEHeaderOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -2959,53 +2928,83 @@ func (f *FUSEOpenOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSECreateMeta) SizeBytes() int {
- return 16
+func (f *FUSEEntryOut) SizeBytes() int {
+ return 40 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSECreateMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+func (f *FUSEEntryOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.NodeID))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Generation))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.EntryValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.EntryValidNSec))
dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNSec))
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSECreateMeta) UnmarshalBytes(src []byte) {
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEEntryOut) UnmarshalBytes(src []byte) {
+ f.NodeID = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Generation = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.EntryValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ f.AttrValidNSec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSECreateMeta) Packed() bool {
- return true
+func (f *FUSEEntryOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSECreateMeta) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEEntryOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSECreateMeta) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEEntryOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEEntryOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3022,13 +3021,23 @@ func (f *FUSECreateMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSECreateMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEEntryOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3044,7 +3053,15 @@ func (f *FUSECreateMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEEntryOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEEntryOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3060,53 +3077,45 @@ func (f *FUSECreateMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitIn) SizeBytes() int {
- return 16
+func (f *FUSEWriteOut) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
+func (f *FUSEWriteOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitIn) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEWriteOut) UnmarshalBytes(src []byte) {
+ f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitIn) Packed() bool {
+func (f *FUSEWriteOut) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEWriteOut) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEWriteOut) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEWriteOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3123,13 +3132,13 @@ func (f *FUSEInitIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEWriteOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3145,7 +3154,7 @@ func (f *FUSEInitIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEWriteOut) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3161,82 +3170,53 @@ func (f *FUSEInitIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEInitOut) SizeBytes() int {
- return 32 +
- 4*8
+func (f *FUSEReleaseIn) SizeBytes() int {
+ return 24
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEInitOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Major))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Minor))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxReadahead))
- dst = dst[4:]
+func (f *FUSEReleaseIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
+ dst = dst[8:]
usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxBackground))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.CongestionThreshold))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MaxWrite))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.TimeGran))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReleaseFlags))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(f.MaxPages))
- dst = dst[2:]
- // Padding: dst[:sizeof(uint16)] ~= uint16(0)
- dst = dst[2:]
- // Padding: dst[:sizeof(uint32)*8] ~= [8]uint32{0}
- dst = dst[4*(8):]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEInitOut) UnmarshalBytes(src []byte) {
- f.Major = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Minor = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (f *FUSEReleaseIn) UnmarshalBytes(src []byte) {
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- f.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.ReleaseFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- // Padding: var _ uint16 ~= src[:sizeof(uint16)]
- src = src[2:]
- // Padding: ~ copy([8]uint32(f._), src[:sizeof(uint32)*8])
- src = src[4*(8):]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEInitOut) Packed() bool {
+func (f *FUSEReleaseIn) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEInitOut) MarshalUnsafe(dst []byte) {
+func (f *FUSEReleaseIn) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEInitOut) UnmarshalUnsafe(src []byte) {
+func (f *FUSEReleaseIn) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEReleaseIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3253,13 +3233,13 @@ func (f *FUSEInitOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEInitOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEReleaseIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3275,7 +3255,7 @@ func (f *FUSEInitOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, er
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEReleaseIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3291,71 +3271,101 @@ func (f *FUSEInitOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEGetAttrOut) SizeBytes() int {
- return 16 +
- (*FUSEAttr)(nil).SizeBytes()
+func (f *FUSESetAttrIn) SizeBytes() int {
+ return 88
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+func (f *FUSESetAttrIn) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Valid))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Size))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Atime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Mtime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Ctime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.MtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.CtimeNsec))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.GID))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
- dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
- f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (f *FUSESetAttrIn) UnmarshalBytes(src []byte) {
+ f.Valid = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Atime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Mtime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.Ctime = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.MtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.CtimeNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ f.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
- src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEGetAttrOut) Packed() bool {
- return f.Attr.Packed()
+func (f *FUSESetAttrIn) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
- if f.Attr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
- } else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
- f.MarshalBytes(dst)
- }
+func (f *FUSESetAttrIn) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
- if f.Attr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(f), src)
- } else {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- f.UnmarshalBytes(src)
- }
+func (f *FUSESetAttrIn) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- f.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (f *FUSESetAttrIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3372,23 +3382,13 @@ func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, lim
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSESetAttrIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- f.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (f *FUSESetAttrIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3404,15 +3404,7 @@ func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
- if !f.Attr.Packed() {
- // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, f.SizeBytes())
- f.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (f *FUSESetAttrIn) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3428,65 +3420,41 @@ func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEReadIn) SizeBytes() int {
- return 40
+//go:nosplit
+func (f *FUSEOpID) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEReadIn) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Fh))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Offset))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Size))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.ReadFlags))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.LockOwner))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Flags))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
+func (f *FUSEOpID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEReadIn) UnmarshalBytes(src []byte) {
- f.Fh = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Offset = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.ReadFlags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.LockOwner = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- f.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
+func (f *FUSEOpID) UnmarshalBytes(src []byte) {
+ *f = FUSEOpID(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEReadIn) Packed() bool {
+func (f *FUSEOpID) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEReadIn) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpID) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEReadIn) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpID) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3503,13 +3471,13 @@ func (f *FUSEReadIn) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEReadIn) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3525,7 +3493,7 @@ func (f *FUSEReadIn) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpID) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3533,7 +3501,7 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -3541,53 +3509,71 @@ func (f *FUSEReadIn) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMknodMeta) SizeBytes() int {
- return 16
+func (f *FUSEGetAttrOut) SizeBytes() int {
+ return 16 +
+ (*FUSEAttr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMknodMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Rdev))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
+func (f *FUSEGetAttrOut) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.AttrValid))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.AttrValidNsec))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
+ f.Attr.MarshalBytes(dst[:f.Attr.SizeBytes()])
+ dst = dst[f.Attr.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMknodMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Rdev = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (f *FUSEGetAttrOut) UnmarshalBytes(src []byte) {
+ f.AttrValid = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ f.AttrValidNsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
+ f.Attr.UnmarshalBytes(src[:f.Attr.SizeBytes()])
+ src = src[f.Attr.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMknodMeta) Packed() bool {
- return true
+func (f *FUSEGetAttrOut) Packed() bool {
+ return f.Attr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMknodMeta) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(f))
+func (f *FUSEGetAttrOut) MarshalUnsafe(dst []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMknodMeta) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(f), src)
+func (f *FUSEGetAttrOut) UnmarshalUnsafe(src []byte) {
+ if f.Attr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEGetAttrOut) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3604,13 +3590,23 @@ func (f *FUSEMknodMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMknodMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEGetAttrOut) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3626,7 +3622,15 @@ func (f *FUSEMknodMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEGetAttrOut) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Attr.Packed() {
+ // Type FUSEGetAttrOut doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3642,45 +3646,41 @@ func (f *FUSEMknodMeta) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (f *FUSEMkdirMeta) SizeBytes() int {
- return 8
+//go:nosplit
+func (f *FUSEOpcode) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (f *FUSEMkdirMeta) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Mode))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Umask))
- dst = dst[4:]
+func (f *FUSEOpcode) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*f))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (f *FUSEMkdirMeta) UnmarshalBytes(src []byte) {
- f.Mode = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- f.Umask = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (f *FUSEOpcode) UnmarshalBytes(src []byte) {
+ *f = FUSEOpcode(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (f *FUSEMkdirMeta) Packed() bool {
+func (f *FUSEOpcode) Packed() bool {
+ // Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (f *FUSEMkdirMeta) MarshalUnsafe(dst []byte) {
+func (f *FUSEOpcode) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(f))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (f *FUSEMkdirMeta) UnmarshalUnsafe(src []byte) {
+func (f *FUSEOpcode) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(f), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FUSEOpcode) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3697,13 +3697,13 @@ func (f *FUSEMkdirMeta) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FUSEOpcode) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3719,7 +3719,7 @@ func (f *FUSEMkdirMeta) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int,
}
// WriteTo implements io.WriterTo.WriteTo.
-func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
+func (f *FUSEOpcode) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -3727,7 +3727,7 @@ func (f *FUSEMkdirMeta) WriteTo(writer io.Writer) (int64, error) {
hdr.Len = f.SizeBytes()
hdr.Cap = f.SizeBytes()
- length, err := writer.Write(buf)
+ length, err := w.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that f
// must live until the use above.
runtime.KeepAlive(f) // escapes: replaced by intrinsic.
@@ -4473,6 +4473,290 @@ func (i *IFConf) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (x *XTGetRevision) SizeBytes() int {
+ return 1 +
+ (*ExtensionName)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (x *XTGetRevision) MarshalBytes(dst []byte) {
+ x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
+ dst = dst[x.Name.SizeBytes():]
+ dst[0] = byte(x.Revision)
+ dst = dst[1:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (x *XTGetRevision) UnmarshalBytes(src []byte) {
+ x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
+ src = src[x.Name.SizeBytes():]
+ x.Revision = uint8(src[0])
+ src = src[1:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (x *XTGetRevision) Packed() bool {
+ return x.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (x *XTGetRevision) MarshalUnsafe(dst []byte) {
+ if x.Name.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(x))
+ } else {
+ // Type XTGetRevision doesn't have a packed layout in memory, fallback to MarshalBytes.
+ x.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (x *XTGetRevision) UnmarshalUnsafe(src []byte) {
+ if x.Name.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(x), src)
+ } else {
+ // Type XTGetRevision doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ x.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ x.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (x *XTGetRevision) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return x.CopyOutN(cc, addr, x.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (x *XTGetRevision) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ x.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) {
+ if !x.Name.Packed() {
+ // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, x.SizeBytes())
+ x.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
+ hdr.Len = x.SizeBytes()
+ hdr.Cap = x.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that x
+ // must live until the use above.
+ runtime.KeepAlive(x) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (i *IPTGetinfo) SizeBytes() int {
+ return 12 +
+ (*TableName)(nil).SizeBytes() +
+ 4*NF_INET_NUMHOOKS +
+ 4*NF_INET_NUMHOOKS
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *IPTGetinfo) MarshalBytes(dst []byte) {
+ i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
+ dst = dst[i.Name.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
+ dst = dst[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
+ dst = dst[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
+ dst = dst[4:]
+ }
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
+ i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
+ src = src[i.Name.SizeBytes():]
+ i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
+ i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ }
+ i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (i *IPTGetinfo) Packed() bool {
+ return i.Name.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (i *IPTGetinfo) MarshalUnsafe(dst []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
+ if i.Name.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Name.Packed() {
+ // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
+ // must live until the use above.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (i *IPTGetEntries) SizeBytes() int {
return 4 +
(*TableName)(nil).SizeBytes() +
@@ -5224,148 +5508,23 @@ func (x *XTCounters) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (x *XTGetRevision) SizeBytes() int {
- return 1 +
- (*ExtensionName)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (x *XTGetRevision) MarshalBytes(dst []byte) {
- x.Name.MarshalBytes(dst[:x.Name.SizeBytes()])
- dst = dst[x.Name.SizeBytes():]
- dst[0] = byte(x.Revision)
- dst = dst[1:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (x *XTGetRevision) UnmarshalBytes(src []byte) {
- x.Name.UnmarshalBytes(src[:x.Name.SizeBytes()])
- src = src[x.Name.SizeBytes():]
- x.Revision = uint8(src[0])
- src = src[1:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (x *XTGetRevision) Packed() bool {
- return x.Name.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (x *XTGetRevision) MarshalUnsafe(dst []byte) {
- if x.Name.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(x))
- } else {
- // Type XTGetRevision doesn't have a packed layout in memory, fallback to MarshalBytes.
- x.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (x *XTGetRevision) UnmarshalUnsafe(src []byte) {
- if x.Name.Packed() {
- safecopy.CopyOut(unsafe.Pointer(x), src)
- } else {
- // Type XTGetRevision doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- x.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (x *XTGetRevision) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !x.Name.Packed() {
- // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
- x.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
- hdr.Len = x.SizeBytes()
- hdr.Cap = x.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that x
- // must live until the use above.
- runtime.KeepAlive(x) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (x *XTGetRevision) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return x.CopyOutN(cc, addr, x.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (x *XTGetRevision) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !x.Name.Packed() {
- // Type XTGetRevision doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(x.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- x.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
- hdr.Len = x.SizeBytes()
- hdr.Cap = x.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that x
- // must live until the use above.
- runtime.KeepAlive(x) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (x *XTGetRevision) WriteTo(writer io.Writer) (int64, error) {
- if !x.Name.Packed() {
- // Type XTGetRevision doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, x.SizeBytes())
- x.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(x)))
- hdr.Len = x.SizeBytes()
- hdr.Cap = x.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that x
- // must live until the use above.
- runtime.KeepAlive(x) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IPTGetinfo) SizeBytes() int {
- return 12 +
+func (i *IP6TReplace) SizeBytes() int {
+ return 24 +
(*TableName)(nil).SizeBytes() +
4*NF_INET_NUMHOOKS +
4*NF_INET_NUMHOOKS
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IPTGetinfo) MarshalBytes(dst []byte) {
+func (i *IP6TReplace) MarshalBytes(dst []byte) {
i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
dst = dst[i.Name.SizeBytes():]
usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ dst = dst[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
dst = dst[4:]
@@ -5374,18 +5533,22 @@ func (i *IPTGetinfo) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
dst = dst[4:]
}
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
+func (i *IP6TReplace) UnmarshalBytes(src []byte) {
i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
src = src[i.Name.SizeBytes():]
i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
@@ -5394,43 +5557,43 @@ func (i *IPTGetinfo) UnmarshalBytes(src []byte) {
i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
- i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *IPTGetinfo) Packed() bool {
+func (i *IP6TReplace) Packed() bool {
return i.Name.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IPTGetinfo) MarshalUnsafe(dst []byte) {
+func (i *IP6TReplace) MarshalUnsafe(dst []byte) {
if i.Name.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
- // Type IPTGetinfo doesn't have a packed layout in memory, fallback to MarshalBytes.
+ // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes.
i.MarshalBytes(dst)
}
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IPTGetinfo) UnmarshalUnsafe(src []byte) {
+func (i *IP6TReplace) UnmarshalUnsafe(src []byte) {
if i.Name.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
- // Type IPTGetinfo doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes.
i.UnmarshalBytes(src)
}
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
@@ -5452,15 +5615,15 @@ func (i *IPTGetinfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *IPTGetinfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
@@ -5484,9 +5647,9 @@ func (i *IPTGetinfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *IPTGetinfo) WriteTo(writer io.Writer) (int64, error) {
+func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) {
if !i.Name.Packed() {
- // Type IPTGetinfo doesn't have a packed layout in memory, fall back to MarshalBytes.
+ // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, i.SizeBytes())
i.MarshalBytes(buf)
length, err := writer.Write(buf)
@@ -5856,169 +6019,6 @@ func (i *IP6TIP) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *IP6TReplace) SizeBytes() int {
- return 24 +
- (*TableName)(nil).SizeBytes() +
- 4*NF_INET_NUMHOOKS +
- 4*NF_INET_NUMHOOKS
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *IP6TReplace) MarshalBytes(dst []byte) {
- i.Name.MarshalBytes(dst[:i.Name.SizeBytes()])
- dst = dst[i.Name.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.ValidHooks))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumEntries))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Size))
- dst = dst[4:]
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.HookEntry[idx]))
- dst = dst[4:]
- }
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.Underflow[idx]))
- dst = dst[4:]
- }
- usermem.ByteOrder.PutUint32(dst[:4], uint32(i.NumCounters))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(i.Counters))
- dst = dst[8:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *IP6TReplace) UnmarshalBytes(src []byte) {
- i.Name.UnmarshalBytes(src[:i.Name.SizeBytes()])
- src = src[i.Name.SizeBytes():]
- i.ValidHooks = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.NumEntries = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.HookEntry[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- }
- for idx := 0; idx < NF_INET_NUMHOOKS; idx++ {
- i.Underflow[idx] = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- }
- i.NumCounters = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- i.Counters = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (i *IP6TReplace) Packed() bool {
- return i.Name.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *IP6TReplace) MarshalUnsafe(dst []byte) {
- if i.Name.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type IP6TReplace doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *IP6TReplace) UnmarshalUnsafe(src []byte) {
- if i.Name.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type IP6TReplace doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (i *IP6TReplace) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Name.Packed() {
- // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (i *IP6TReplace) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (i *IP6TReplace) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Name.Packed() {
- // Type IP6TReplace doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (i *IP6TReplace) WriteTo(writer io.Writer) (int64, error) {
- if !i.Name.Packed() {
- // Type IP6TReplace doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
- // must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SockAddrNetlink) SizeBytes() int {
return 12
}
@@ -7008,98 +7008,57 @@ func (s *SemInfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *ShmidDS) SizeBytes() int {
- return 40 +
- (*IPCPerm)(nil).SizeBytes() +
- (*TimeT)(nil).SizeBytes() +
- (*TimeT)(nil).SizeBytes() +
- (*TimeT)(nil).SizeBytes()
+func (s *ShmParams) SizeBytes() int {
+ return 40
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *ShmidDS) MarshalBytes(dst []byte) {
- s.ShmPerm.MarshalBytes(dst[:s.ShmPerm.SizeBytes()])
- dst = dst[s.ShmPerm.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSegsz))
+func (s *ShmParams) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMax))
dst = dst[8:]
- s.ShmAtime.MarshalBytes(dst[:s.ShmAtime.SizeBytes()])
- dst = dst[s.ShmAtime.SizeBytes():]
- s.ShmDtime.MarshalBytes(dst[:s.ShmDtime.SizeBytes()])
- dst = dst[s.ShmDtime.SizeBytes():]
- s.ShmCtime.MarshalBytes(dst[:s.ShmCtime.SizeBytes()])
- dst = dst[s.ShmCtime.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmCpid))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmLpid))
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmNattach))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMin))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused4))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMni))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused5))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSeg))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmAll))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *ShmidDS) UnmarshalBytes(src []byte) {
- s.ShmPerm.UnmarshalBytes(src[:s.ShmPerm.SizeBytes()])
- src = src[s.ShmPerm.SizeBytes():]
- s.ShmSegsz = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (s *ShmParams) UnmarshalBytes(src []byte) {
+ s.ShmMax = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmAtime.UnmarshalBytes(src[:s.ShmAtime.SizeBytes()])
- src = src[s.ShmAtime.SizeBytes():]
- s.ShmDtime.UnmarshalBytes(src[:s.ShmDtime.SizeBytes()])
- src = src[s.ShmDtime.SizeBytes():]
- s.ShmCtime.UnmarshalBytes(src[:s.ShmCtime.SizeBytes()])
- src = src[s.ShmCtime.SizeBytes():]
- s.ShmCpid = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.ShmLpid = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.ShmNattach = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmMin = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Unused4 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmMni = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Unused5 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmSeg = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.ShmAll = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *ShmidDS) Packed() bool {
- return s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed()
+func (s *ShmParams) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *ShmidDS) MarshalUnsafe(dst []byte) {
- if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type ShmidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
+func (s *ShmParams) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *ShmidDS) UnmarshalUnsafe(src []byte) {
- if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type ShmidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
+func (s *ShmParams) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
- // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7116,23 +7075,13 @@ func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmParams) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
- // Type ShmidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7148,15 +7097,7 @@ func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *ShmidDS) WriteTo(writer io.Writer) (int64, error) {
- if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
- // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (s *ShmParams) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7172,57 +7113,66 @@ func (s *ShmidDS) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *ShmParams) SizeBytes() int {
- return 40
+func (s *ShmInfo) SizeBytes() int {
+ return 44 +
+ 1*4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *ShmParams) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMax))
+func (s *ShmInfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
+ dst = dst[1*(4):]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMin))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmMni))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSeg))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmAll))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *ShmParams) UnmarshalBytes(src []byte) {
- s.ShmMax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (s *ShmInfo) UnmarshalBytes(src []byte) {
+ s.UsedIDs = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4])
+ src = src[1*(4):]
+ s.ShmTot = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmMin = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmRss = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmMni = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmSwp = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmSeg = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SwapAttempts = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmAll = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.SwapSuccesses = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *ShmParams) Packed() bool {
+func (s *ShmInfo) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *ShmParams) MarshalUnsafe(dst []byte) {
+func (s *ShmInfo) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *ShmParams) UnmarshalUnsafe(src []byte) {
+func (s *ShmInfo) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7239,13 +7189,13 @@ func (s *ShmParams) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmParams) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7261,7 +7211,7 @@ func (s *ShmParams) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, erro
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *ShmParams) WriteTo(writer io.Writer) (int64, error) {
+func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7277,66 +7227,98 @@ func (s *ShmParams) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *ShmInfo) SizeBytes() int {
- return 44 +
- 1*4
+func (s *ShmidDS) SizeBytes() int {
+ return 40 +
+ (*IPCPerm)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes() +
+ (*TimeT)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *ShmInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.UsedIDs))
- dst = dst[4:]
- // Padding: dst[:sizeof(byte)*4] ~= [4]byte{0}
- dst = dst[1*(4):]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmTot))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmRss))
+func (s *ShmidDS) MarshalBytes(dst []byte) {
+ s.ShmPerm.MarshalBytes(dst[:s.ShmPerm.SizeBytes()])
+ dst = dst[s.ShmPerm.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSegsz))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmSwp))
+ s.ShmAtime.MarshalBytes(dst[:s.ShmAtime.SizeBytes()])
+ dst = dst[s.ShmAtime.SizeBytes():]
+ s.ShmDtime.MarshalBytes(dst[:s.ShmDtime.SizeBytes()])
+ dst = dst[s.ShmDtime.SizeBytes():]
+ s.ShmCtime.MarshalBytes(dst[:s.ShmCtime.SizeBytes()])
+ dst = dst[s.ShmCtime.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmCpid))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.ShmLpid))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.ShmNattach))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapAttempts))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused4))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.SwapSuccesses))
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Unused5))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *ShmInfo) UnmarshalBytes(src []byte) {
- s.UsedIDs = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: ~ copy([4]byte(s._), src[:sizeof(byte)*4])
- src = src[1*(4):]
- s.ShmTot = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.ShmRss = uint64(usermem.ByteOrder.Uint64(src[:8]))
+func (s *ShmidDS) UnmarshalBytes(src []byte) {
+ s.ShmPerm.UnmarshalBytes(src[:s.ShmPerm.SizeBytes()])
+ src = src[s.ShmPerm.SizeBytes():]
+ s.ShmSegsz = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.ShmSwp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.ShmAtime.UnmarshalBytes(src[:s.ShmAtime.SizeBytes()])
+ src = src[s.ShmAtime.SizeBytes():]
+ s.ShmDtime.UnmarshalBytes(src[:s.ShmDtime.SizeBytes()])
+ src = src[s.ShmDtime.SizeBytes():]
+ s.ShmCtime.UnmarshalBytes(src[:s.ShmCtime.SizeBytes()])
+ src = src[s.ShmCtime.SizeBytes():]
+ s.ShmCpid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ShmLpid = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.ShmNattach = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SwapAttempts = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Unused4 = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.SwapSuccesses = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Unused5 = uint64(usermem.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *ShmInfo) Packed() bool {
- return true
+func (s *ShmidDS) Packed() bool {
+ return s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *ShmInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
+func (s *ShmidDS) MarshalUnsafe(dst []byte) {
+ if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type ShmidDS doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *ShmInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
+func (s *ShmidDS) UnmarshalUnsafe(src []byte) {
+ if s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type ShmidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *ShmidDS) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7353,13 +7335,23 @@ func (s *ShmInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int)
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *ShmInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmidDS) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *ShmidDS) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7375,7 +7367,15 @@ func (s *ShmInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error)
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *ShmInfo) WriteTo(writer io.Writer) (int64, error) {
+func (s *ShmidDS) WriteTo(writer io.Writer) (int64, error) {
+ if !s.ShmAtime.Packed() && s.ShmCtime.Packed() && s.ShmDtime.Packed() && s.ShmPerm.Packed() {
+ // Type ShmidDS doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7730,168 +7730,250 @@ func (s *SignalfdSiginfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrLink) SizeBytes() int {
- return 12 +
- 1*8
+func (l *Linger) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrLink) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
+func (l *Linger) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
dst = dst[4:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
- dst = dst[2:]
- dst[0] = byte(s.PacketType)
- dst = dst[1:]
- dst[0] = byte(s.HardwareAddrLen)
- dst = dst[1:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s.HardwareAddr[idx])
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (l *Linger) UnmarshalBytes(src []byte) {
+ l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ l.Linger = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (l *Linger) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (l *Linger) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(l))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (l *Linger) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(l), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return l.CopyOutN(cc, addr, l.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
+ hdr.Len = l.SizeBytes()
+ hdr.Cap = l.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that l
+ // must live until the use above.
+ runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (i *InetAddr) SizeBytes() int {
+ return 1 * 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (i *InetAddr) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 4; idx++ {
+ dst[0] = byte(i[idx])
dst = dst[1:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrLink) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.PacketType = src[0]
- src = src[1:]
- s.HardwareAddrLen = src[0]
- src = src[1:]
- for idx := 0; idx < 8; idx++ {
- s.HardwareAddr[idx] = src[0]
+func (i *InetAddr) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 4; idx++ {
+ i[idx] = src[0]
src = src[1:]
}
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrLink) Packed() bool {
+func (i *InetAddr) Packed() bool {
+ // Array newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrLink) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
+func (i *InetAddr) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
+func (i *InetAddr) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
+func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
+func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrUnix) SizeBytes() int {
- return 2 +
- 1*UnixPathMax
+func (s *SockAddrInet6) SizeBytes() int {
+ return 12 +
+ 1*16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrUnix) MarshalBytes(dst []byte) {
+func (s *SockAddrInet6) MarshalBytes(dst []byte) {
usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
dst = dst[2:]
- for idx := 0; idx < UnixPathMax; idx++ {
- dst[0] = byte(s.Path[idx])
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo))
+ dst = dst[4:]
+ for idx := 0; idx < 16; idx++ {
+ dst[0] = byte(s.Addr[idx])
dst = dst[1:]
}
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id))
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
+func (s *SockAddrInet6) UnmarshalBytes(src []byte) {
s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
src = src[2:]
- for idx := 0; idx < UnixPathMax; idx++ {
- s.Path[idx] = int8(src[0])
+ s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Flowinfo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 16; idx++ {
+ s.Addr[idx] = src[0]
src = src[1:]
}
+ s.Scope_id = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrUnix) Packed() bool {
+func (s *SockAddrInet6) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrUnix) MarshalUnsafe(dst []byte) {
+func (s *SockAddrInet6) MarshalUnsafe(dst []byte) {
safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
+func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) {
safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7908,13 +7990,13 @@ func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7930,7 +8012,7 @@ func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, e
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
+func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -7946,95 +8028,120 @@ func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (l *Linger) SizeBytes() int {
- return 8
+func (s *SockAddrLink) SizeBytes() int {
+ return 12 +
+ 1*8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (l *Linger) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.OnOff))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(l.Linger))
+func (s *SockAddrLink) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Protocol))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.InterfaceIndex))
dst = dst[4:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.ARPHardwareType))
+ dst = dst[2:]
+ dst[0] = byte(s.PacketType)
+ dst = dst[1:]
+ dst[0] = byte(s.HardwareAddrLen)
+ dst = dst[1:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s.HardwareAddr[idx])
+ dst = dst[1:]
+ }
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (l *Linger) UnmarshalBytes(src []byte) {
- l.OnOff = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- l.Linger = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (s *SockAddrLink) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Protocol = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.InterfaceIndex = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
+ s.ARPHardwareType = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.PacketType = src[0]
+ src = src[1:]
+ s.HardwareAddrLen = src[0]
+ src = src[1:]
+ for idx := 0; idx < 8; idx++ {
+ s.HardwareAddr[idx] = src[0]
+ src = src[1:]
+ }
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (l *Linger) Packed() bool {
+func (s *SockAddrLink) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (l *Linger) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(l))
+func (s *SockAddrLink) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (l *Linger) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(l), src)
+func (s *SockAddrLink) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (l *Linger) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrLink) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (l *Linger) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return l.CopyOutN(cc, addr, l.SizeBytes())
+func (s *SockAddrLink) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (l *Linger) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrLink) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (l *Linger) WriteTo(writer io.Writer) (int64, error) {
+func (s *SockAddrLink) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(l)))
- hdr.Len = l.SizeBytes()
- hdr.Cap = l.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that l
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(l) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -8308,112 +8415,99 @@ func (t *TCPInfo) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SockAddrInet6) SizeBytes() int {
- return 12 +
- 1*16
+func (c *ControlMessageCredentials) SizeBytes() int {
+ return 12
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SockAddrInet6) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
- dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Port))
- dst = dst[2:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flowinfo))
+func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
dst = dst[4:]
- for idx := 0; idx < 16; idx++ {
- dst[0] = byte(s.Addr[idx])
- dst = dst[1:]
- }
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Scope_id))
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SockAddrInet6) UnmarshalBytes(src []byte) {
- s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Port = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
- s.Flowinfo = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
+ c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
- for idx := 0; idx < 16; idx++ {
- s.Addr[idx] = src[0]
- src = src[1:]
- }
- s.Scope_id = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (s *SockAddrInet6) Packed() bool {
+func (c *ControlMessageCredentials) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SockAddrInet6) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
+func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(c))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SockAddrInet6) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
+func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(c), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SockAddrInet6) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SockAddrInet6) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
+func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return c.CopyOutN(cc, addr, c.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SockAddrInet6) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (s *SockAddrInet6) WriteTo(writer io.Writer) (int64, error) {
+func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
+ hdr.Len = c.SizeBytes()
+ hdr.Cap = c.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
+ // Since we bypassed the compiler's escape analysis, indicate that c
// must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(c) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -8651,144 +8745,162 @@ func (i *Inet6Addr) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (c *ControlMessageCredentials) SizeBytes() int {
- return 12
+func (s *SockAddrUnix) SizeBytes() int {
+ return 2 +
+ 1*UnixPathMax
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (c *ControlMessageCredentials) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.PID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.UID))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(c.GID))
- dst = dst[4:]
+func (s *SockAddrUnix) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Family))
+ dst = dst[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ dst[0] = byte(s.Path[idx])
+ dst = dst[1:]
+ }
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (c *ControlMessageCredentials) UnmarshalBytes(src []byte) {
- c.PID = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.UID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- c.GID = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
+func (s *SockAddrUnix) UnmarshalBytes(src []byte) {
+ s.Family = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ for idx := 0; idx < UnixPathMax; idx++ {
+ s.Path[idx] = int8(src[0])
+ src = src[1:]
+ }
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (c *ControlMessageCredentials) Packed() bool {
+func (s *SockAddrUnix) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (c *ControlMessageCredentials) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(c))
+func (s *SockAddrUnix) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (c *ControlMessageCredentials) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(c), src)
+func (s *SockAddrUnix) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SockAddrUnix) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (c *ControlMessageCredentials) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return c.CopyOutN(cc, addr, c.SizeBytes())
+func (s *SockAddrUnix) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (c *ControlMessageCredentials) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SockAddrUnix) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (c *ControlMessageCredentials) WriteTo(writer io.Writer) (int64, error) {
+func (s *SockAddrUnix) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(c)))
- hdr.Len = c.SizeBytes()
- hdr.Cap = c.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that c
+ // Since we bypassed the compiler's escape analysis, indicate that s
// must live until the use above.
- runtime.KeepAlive(c) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (i *InetAddr) SizeBytes() int {
- return 1 * 4
+func (i *ItimerVal) SizeBytes() int {
+ return 0 +
+ (*Timeval)(nil).SizeBytes() +
+ (*Timeval)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *InetAddr) MarshalBytes(dst []byte) {
- for idx := 0; idx < 4; idx++ {
- dst[0] = byte(i[idx])
- dst = dst[1:]
- }
+func (i *ItimerVal) MarshalBytes(dst []byte) {
+ i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
+ dst = dst[i.Interval.SizeBytes():]
+ i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
+ dst = dst[i.Value.SizeBytes():]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *InetAddr) UnmarshalBytes(src []byte) {
- for idx := 0; idx < 4; idx++ {
- i[idx] = src[0]
- src = src[1:]
- }
+func (i *ItimerVal) UnmarshalBytes(src []byte) {
+ i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
+ src = src[i.Interval.SizeBytes():]
+ i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
+ src = src[i.Value.SizeBytes():]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *InetAddr) Packed() bool {
- // Array newtypes are always packed.
- return true
+func (i *ItimerVal) Packed() bool {
+ return i.Interval.Packed() && i.Value.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *InetAddr) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (i *ItimerVal) MarshalUnsafe(dst []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
+ i.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *InetAddr) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
+ if i.Interval.Packed() && i.Value.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
+ } else {
+ // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ i.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ i.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8805,13 +8917,23 @@ func (i *InetAddr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *InetAddr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ i.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8827,7 +8949,15 @@ func (i *InetAddr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
+func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
+ if !i.Interval.Packed() && i.Value.Packed() {
+ // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, i.SizeBytes())
+ i.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -8835,7 +8965,7 @@ func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
hdr.Len = i.SizeBytes()
hdr.Cap = i.SizeBytes()
- length, err := w.Write(buf)
+ length, err := writer.Write(buf)
// Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
runtime.KeepAlive(i) // escapes: replaced by intrinsic.
@@ -8843,91 +8973,95 @@ func (i *InetAddr) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (t *TimeT) SizeBytes() int {
- return 8
+func (u *Utime) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *TimeT) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+func (u *Utime) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
+ dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *TimeT) UnmarshalBytes(src []byte) {
- *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (u *Utime) UnmarshalBytes(src []byte) {
+ u.Actime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (t *TimeT) Packed() bool {
- // Scalar newtypes are always packed.
+func (u *Utime) Packed() bool {
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *TimeT) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(t))
+func (u *Utime) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *TimeT) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(t), src)
+func (u *Utime) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return t.CopyOutN(cc, addr, t.SizeBytes())
+func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that t
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
- hdr.Len = t.SizeBytes()
- hdr.Cap = t.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that t
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -9509,132 +9643,188 @@ func (t *TimerID) WriteTo(w io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (i *ItimerVal) SizeBytes() int {
- return 0 +
- (*Timeval)(nil).SizeBytes() +
- (*Timeval)(nil).SizeBytes()
+func (sxts *StatxTimestamp) SizeBytes() int {
+ return 16
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *ItimerVal) MarshalBytes(dst []byte) {
- i.Interval.MarshalBytes(dst[:i.Interval.SizeBytes()])
- dst = dst[i.Interval.SizeBytes():]
- i.Value.MarshalBytes(dst[:i.Value.SizeBytes()])
- dst = dst[i.Value.SizeBytes():]
+func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(int32)] ~= int32(0)
+ dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *ItimerVal) UnmarshalBytes(src []byte) {
- i.Interval.UnmarshalBytes(src[:i.Interval.SizeBytes()])
- src = src[i.Interval.SizeBytes():]
- i.Value.UnmarshalBytes(src[:i.Value.SizeBytes()])
- src = src[i.Value.SizeBytes():]
+func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
+ sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ int32 ~= src[:sizeof(int32)]
+ src = src[4:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *ItimerVal) Packed() bool {
- return i.Interval.Packed() && i.Value.Packed()
+func (sxts *StatxTimestamp) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *ItimerVal) MarshalUnsafe(dst []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to MarshalBytes.
- i.MarshalBytes(dst)
- }
+func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(sxts))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *ItimerVal) UnmarshalUnsafe(src []byte) {
- if i.Interval.Packed() && i.Value.Packed() {
- safecopy.CopyOut(unsafe.Pointer(i), src)
- } else {
- // Type ItimerVal doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- i.UnmarshalBytes(src)
- }
+func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(sxts), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *ItimerVal) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- i.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *ItimerVal) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *ItimerVal) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- i.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *ItimerVal) WriteTo(writer io.Writer) (int64, error) {
- if !i.Interval.Packed() && i.Value.Packed() {
- // Type ItimerVal doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, i.SizeBytes())
- i.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
+ hdr.Len = sxts.SizeBytes()
+ hdr.Cap = sxts.SizeBytes()
length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that sxts
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
+func (t *TimeT) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (t *TimeT) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*t))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (t *TimeT) UnmarshalBytes(src []byte) {
+ *t = TimeT(int64(usermem.ByteOrder.Uint64(src[:8])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (t *TimeT) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (t *TimeT) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(t))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (t *TimeT) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(t), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (t *TimeT) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (t *TimeT) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return t.CopyOutN(cc, addr, t.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (t *TimeT) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (t *TimeT) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(t)))
+ hdr.Len = t.SizeBytes()
+ hdr.Cap = t.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that t
+ // must live until the use above.
+ runtime.KeepAlive(t) // escapes: replaced by intrinsic.
return int64(length), err
}
@@ -9868,196 +10058,6 @@ func (t *Tms) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (sxts *StatxTimestamp) SizeBytes() int {
- return 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (sxts *StatxTimestamp) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(sxts.Sec))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(sxts.Nsec))
- dst = dst[4:]
- // Padding: dst[:sizeof(int32)] ~= int32(0)
- dst = dst[4:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (sxts *StatxTimestamp) UnmarshalBytes(src []byte) {
- sxts.Sec = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- sxts.Nsec = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ int32 ~= src[:sizeof(int32)]
- src = src[4:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (sxts *StatxTimestamp) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (sxts *StatxTimestamp) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(sxts))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (sxts *StatxTimestamp) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(sxts), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return sxts.CopyOutN(cc, addr, sxts.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (sxts *StatxTimestamp) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (sxts *StatxTimestamp) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(sxts)))
- hdr.Len = sxts.SizeBytes()
- hdr.Cap = sxts.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that sxts
- // must live until the use above.
- runtime.KeepAlive(sxts) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *Utime) SizeBytes() int {
- return 16
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Utime) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Actime))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Modtime))
- dst = dst[8:]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Utime) UnmarshalBytes(src []byte) {
- u.Actime = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Modtime = int64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *Utime) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Utime) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Utime) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *Utime) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *Utime) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *Utime) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *Utime) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (w *Winsize) SizeBytes() int {
return 8
}
diff --git a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
index e198e8ac2..0ddb07673 100644
--- a/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
+++ b/pkg/marshal/primitive/primitive_abi_autogen_unsafe.go
@@ -25,101 +25,101 @@ var _ marshal.Marshallable = (*Uint8)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int16) SizeBytes() int {
- return 2
+func (u *Uint32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
+func (u *Uint32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int16) UnmarshalBytes(src []byte) {
- *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
+func (u *Uint32) UnmarshalBytes(src []byte) {
+ *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int16) Packed() bool {
+func (u *Uint32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int16) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (u *Uint32) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int16) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (u *Uint32) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int16) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
+// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
//go:nosplit
-func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
+func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -138,14 +138,14 @@ func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (i
return length, err
}
-// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
+// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
//go:nosplit
-func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
+func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -164,13 +164,13 @@ func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (
return length, err
}
-// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
-func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
+// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
+func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -182,13 +182,13 @@ func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
-func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
+// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
+func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int16)(nil).SizeBytes()
+ size := (*Uint32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -202,101 +202,101 @@ func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint16) SizeBytes() int {
- return 2
+func (i *Int64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint16) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
+func (i *Int64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint16) UnmarshalBytes(src []byte) {
- *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
+func (i *Int64) UnmarshalBytes(src []byte) {
+ *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint16) Packed() bool {
+func (i *Int64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint16) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
+func (i *Int64) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint16) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
+func (i *Int64) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
+func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
+func (i *Int64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
+// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
//go:nosplit
-func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
+func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -315,14 +315,14 @@ func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16)
return length, err
}
-// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
+// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
//go:nosplit
-func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
+func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -341,13 +341,13 @@ func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16)
return length, err
}
-// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
-func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
+// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
+func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -359,13 +359,13 @@ func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
-func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
+// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
+func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint16)(nil).SizeBytes()
+ size := (*Int64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -379,101 +379,101 @@ func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int32) SizeBytes() int {
- return 4
+func (u *Uint64) SizeBytes() int {
+ return 8
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
+func (u *Uint64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int32) UnmarshalBytes(src []byte) {
- *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
+func (u *Uint64) UnmarshalBytes(src []byte) {
+ *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int32) Packed() bool {
+func (u *Uint64) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int32) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (u *Uint64) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int32) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (u *Uint64) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int32) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
+// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
//go:nosplit
-func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
+func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -492,14 +492,14 @@ func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (i
return length, err
}
-// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
+// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
//go:nosplit
-func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
+func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -518,13 +518,13 @@ func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (
return length, err
}
-// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
-func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
+// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
+func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -536,13 +536,13 @@ func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
-func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
+// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
+func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int32)(nil).SizeBytes()
+ size := (*Uint64)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -556,101 +556,101 @@ func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint32) SizeBytes() int {
- return 4
+func (i *Int8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint32) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*u))
+func (i *Int8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*i)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint32) UnmarshalBytes(src []byte) {
- *u = Uint32(uint32(usermem.ByteOrder.Uint32(src[:4])))
+func (i *Int8) UnmarshalBytes(src []byte) {
+ *i = Int8(int8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint32) Packed() bool {
+func (i *Int8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint32) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
+func (i *Int8) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint32) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
+func (i *Int8) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
+func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint32) WriteTo(w io.Writer) (int64, error) {
+func (i *Int8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyUint32SliceIn copies in a slice of uint32 objects from the task's memory.
+// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
//go:nosplit
-func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32) (int, error) {
+func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -669,14 +669,14 @@ func CopyUint32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint32)
return length, err
}
-// CopyUint32SliceOut copies a slice of uint32 objects to the task's memory.
+// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
//go:nosplit
-func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32) (int, error) {
+func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -695,13 +695,13 @@ func CopyUint32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint32)
return length, err
}
-// MarshalUnsafeUint32Slice is like Uint32.MarshalUnsafe, but for a []Uint32.
-func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
+// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
+func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -713,13 +713,13 @@ func MarshalUnsafeUint32Slice(src []Uint32, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint32Slice is like Uint32.UnmarshalUnsafe, but for a []Uint32.
-func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
+// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
+func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint32)(nil).SizeBytes()
+ size := (*Int8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -733,101 +733,101 @@ func UnmarshalUnsafeUint32Slice(dst []Uint32, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int64) SizeBytes() int {
- return 8
+func (u *Uint8) SizeBytes() int {
+ return 1
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*i))
+func (u *Uint8) MarshalBytes(dst []byte) {
+ dst[0] = byte(*u)
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int64) UnmarshalBytes(src []byte) {
- *i = Int64(int64(usermem.ByteOrder.Uint64(src[:8])))
+func (u *Uint8) UnmarshalBytes(src []byte) {
+ *u = Uint8(uint8(src[0]))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int64) Packed() bool {
+func (u *Uint8) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int64) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (u *Uint8) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int64) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (u *Uint8) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int64) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyInt64SliceIn copies in a slice of int64 objects from the task's memory.
+// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
//go:nosplit
-func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (int, error) {
+func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -846,14 +846,14 @@ func CopyInt64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int64) (i
return length, err
}
-// CopyInt64SliceOut copies a slice of int64 objects to the task's memory.
+// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
//go:nosplit
-func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (int, error) {
+func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -872,13 +872,13 @@ func CopyInt64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int64) (
return length, err
}
-// MarshalUnsafeInt64Slice is like Int64.MarshalUnsafe, but for a []Int64.
-func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
+// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
+func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -890,13 +890,13 @@ func MarshalUnsafeInt64Slice(src []Int64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt64Slice is like Int64.UnmarshalUnsafe, but for a []Int64.
-func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
+// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
+func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int64)(nil).SizeBytes()
+ size := (*Uint8)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -910,101 +910,101 @@ func UnmarshalUnsafeInt64Slice(dst []Int64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint64) SizeBytes() int {
- return 8
+func (i *Int16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(*u))
+func (i *Int16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint64) UnmarshalBytes(src []byte) {
- *u = Uint64(uint64(usermem.ByteOrder.Uint64(src[:8])))
+func (i *Int16) UnmarshalBytes(src []byte) {
+ *i = Int16(int16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint64) Packed() bool {
+func (i *Int16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint64) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
+func (i *Int16) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint64) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
+func (i *Int16) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
+func (i *Int16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint64) WriteTo(w io.Writer) (int64, error) {
+func (i *Int16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyUint64SliceIn copies in a slice of uint64 objects from the task's memory.
+// CopyInt16SliceIn copies in a slice of int16 objects from the task's memory.
//go:nosplit
-func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64) (int, error) {
+func CopyInt16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1023,14 +1023,14 @@ func CopyUint64SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint64)
return length, err
}
-// CopyUint64SliceOut copies a slice of uint64 objects to the task's memory.
+// CopyInt16SliceOut copies a slice of int16 objects to the task's memory.
//go:nosplit
-func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64) (int, error) {
+func CopyInt16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1049,13 +1049,13 @@ func CopyUint64SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint64)
return length, err
}
-// MarshalUnsafeUint64Slice is like Uint64.MarshalUnsafe, but for a []Uint64.
-func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
+// MarshalUnsafeInt16Slice is like Int16.MarshalUnsafe, but for a []Int16.
+func MarshalUnsafeInt16Slice(src []Int16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1067,13 +1067,13 @@ func MarshalUnsafeUint64Slice(src []Uint64, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint64Slice is like Uint64.UnmarshalUnsafe, but for a []Uint64.
-func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
+// UnmarshalUnsafeInt16Slice is like Int16.UnmarshalUnsafe, but for a []Int16.
+func UnmarshalUnsafeInt16Slice(dst []Int16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint64)(nil).SizeBytes()
+ size := (*Int16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1087,101 +1087,101 @@ func UnmarshalUnsafeUint64Slice(dst []Uint64, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (i *Int8) SizeBytes() int {
- return 1
+func (u *Uint16) SizeBytes() int {
+ return 2
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (i *Int8) MarshalBytes(dst []byte) {
- dst[0] = byte(*i)
+func (u *Uint16) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(*u))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (i *Int8) UnmarshalBytes(src []byte) {
- *i = Int8(int8(src[0]))
+func (u *Uint16) UnmarshalBytes(src []byte) {
+ *u = Uint16(uint16(usermem.ByteOrder.Uint16(src[:2])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (i *Int8) Packed() bool {
+func (u *Uint16) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (i *Int8) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(i))
+func (u *Uint16) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (i *Int8) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(i), src)
+func (u *Uint16) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (i *Int8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *Uint16) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (i *Int8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return i.CopyOutN(cc, addr, i.SizeBytes())
+func (u *Uint16) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (i *Int8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *Uint16) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (i *Int8) WriteTo(w io.Writer) (int64, error) {
+func (u *Uint16) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
- hdr.Len = i.SizeBytes()
- hdr.Cap = i.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that i
+ // Since we bypassed the compiler's escape analysis, indicate that u
// must live until the use above.
- runtime.KeepAlive(i) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyInt8SliceIn copies in a slice of int8 objects from the task's memory.
+// CopyUint16SliceIn copies in a slice of uint16 objects from the task's memory.
//go:nosplit
-func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int, error) {
+func CopyUint16SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint16) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1200,14 +1200,14 @@ func CopyInt8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int8) (int
return length, err
}
-// CopyInt8SliceOut copies a slice of int8 objects to the task's memory.
+// CopyUint16SliceOut copies a slice of uint16 objects to the task's memory.
//go:nosplit
-func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (int, error) {
+func CopyUint16SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint16) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1226,13 +1226,13 @@ func CopyInt8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int8) (in
return length, err
}
-// MarshalUnsafeInt8Slice is like Int8.MarshalUnsafe, but for a []Int8.
-func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
+// MarshalUnsafeUint16Slice is like Uint16.MarshalUnsafe, but for a []Uint16.
+func MarshalUnsafeUint16Slice(src []Uint16, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1244,13 +1244,13 @@ func MarshalUnsafeInt8Slice(src []Int8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeInt8Slice is like Int8.UnmarshalUnsafe, but for a []Int8.
-func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
+// UnmarshalUnsafeUint16Slice is like Uint16.UnmarshalUnsafe, but for a []Uint16.
+func UnmarshalUnsafeUint16Slice(dst []Uint16, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Int8)(nil).SizeBytes()
+ size := (*Uint16)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1264,101 +1264,101 @@ func UnmarshalUnsafeInt8Slice(dst []Int8, src []byte) (int, error) {
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
-func (u *Uint8) SizeBytes() int {
- return 1
+func (i *Int32) SizeBytes() int {
+ return 4
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *Uint8) MarshalBytes(dst []byte) {
- dst[0] = byte(*u)
+func (i *Int32) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*i))
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *Uint8) UnmarshalBytes(src []byte) {
- *u = Uint8(uint8(src[0]))
+func (i *Int32) UnmarshalBytes(src []byte) {
+ *i = Int32(int32(usermem.ByteOrder.Uint32(src[:4])))
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (u *Uint8) Packed() bool {
+func (i *Int32) Packed() bool {
// Scalar newtypes are always packed.
return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *Uint8) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
+func (i *Int32) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(i))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *Uint8) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(u), src)
+func (i *Int32) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(i), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *Uint8) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (i *Int32) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *Uint8) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
+func (i *Int32) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return i.CopyOutN(cc, addr, i.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *Uint8) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (i *Int32) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (u *Uint8) WriteTo(w io.Writer) (int64, error) {
+func (i *Int32) WriteTo(w io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(i)))
+ hdr.Len = i.SizeBytes()
+ hdr.Cap = i.SizeBytes()
length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
+ // Since we bypassed the compiler's escape analysis, indicate that i
// must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ runtime.KeepAlive(i) // escapes: replaced by intrinsic.
return int64(length), err
}
-// CopyUint8SliceIn copies in a slice of uint8 objects from the task's memory.
+// CopyInt32SliceIn copies in a slice of int32 objects from the task's memory.
//go:nosplit
-func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (int, error) {
+func CopyInt32SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []int32) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1377,14 +1377,14 @@ func CopyUint8SliceIn(cc marshal.CopyContext, addr usermem.Addr, dst []uint8) (i
return length, err
}
-// CopyUint8SliceOut copies a slice of uint8 objects to the task's memory.
+// CopyInt32SliceOut copies a slice of int32 objects to the task's memory.
//go:nosplit
-func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (int, error) {
+func CopyInt32SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []int32) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1403,13 +1403,13 @@ func CopyUint8SliceOut(cc marshal.CopyContext, addr usermem.Addr, src []uint8) (
return length, err
}
-// MarshalUnsafeUint8Slice is like Uint8.MarshalUnsafe, but for a []Uint8.
-func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
+// MarshalUnsafeInt32Slice is like Int32.MarshalUnsafe, but for a []Int32.
+func MarshalUnsafeInt32Slice(src []Int32, dst []byte) (int, error) {
count := len(src)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&src)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
@@ -1421,13 +1421,13 @@ func MarshalUnsafeUint8Slice(src []Uint8, dst []byte) (int, error) {
return length, err
}
-// UnmarshalUnsafeUint8Slice is like Uint8.UnmarshalUnsafe, but for a []Uint8.
-func UnmarshalUnsafeUint8Slice(dst []Uint8, src []byte) (int, error) {
+// UnmarshalUnsafeInt32Slice is like Int32.UnmarshalUnsafe, but for a []Int32.
+func UnmarshalUnsafeInt32Slice(dst []Int32, src []byte) (int, error) {
count := len(dst)
if count == 0 {
return 0, nil
}
- size := (*Uint8)(nil).SizeBytes()
+ size := (*Int32)(nil).SizeBytes()
ptr := unsafe.Pointer(&dst)
val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index cdbc88377..466bf889a 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -25,149 +25,6 @@ var _ marshal.Marshallable = (*UContext64)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *UContext64) SizeBytes() int {
- return 16 +
- (*SignalStack)(nil).SizeBytes() +
- (*SignalContext64)(nil).SizeBytes() +
- (*linux.SignalSet)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
- dst = dst[8:]
- u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
- dst = dst[u.Stack.SizeBytes():]
- u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
- dst = dst[u.MContext.SizeBytes():]
- u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
- dst = dst[u.Sigset.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
- src = src[u.Stack.SizeBytes():]
- u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
- src = src[u.MContext.SizeBytes():]
- u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
- src = src[u.Sigset.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *UContext64) Packed() bool {
- return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *UContext64) MarshalUnsafe(dst []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- u.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *UContext64) UnmarshalUnsafe(src []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyOut(unsafe.Pointer(u), src)
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- u.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- u.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- u.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, u.SizeBytes())
- u.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalContext64) SizeBytes() int {
return 184 +
(*linux.SignalSet)(nil).SizeBytes() +
@@ -405,3 +262,146 @@ func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*SignalContext64)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index aac25375e..b8667cdb9 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -27,174 +27,6 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalContext64) SizeBytes() int {
- return 32 +
- 8*31 +
- 1*8 +
- (*FpsimdContext)(nil).SizeBytes() +
- 1*3568
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
- dst = dst[8:]
- for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
- dst = dst[8:]
- }
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
- dst = dst[8:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s._pad[idx])
- dst = dst[1:]
- }
- s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
- dst = dst[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- dst[0] = byte(s.Reserved[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- }
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 8; idx++ {
- s._pad[idx] = src[0]
- src = src[1:]
- }
- s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
- src = src[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- s.Reserved[idx] = uint8(src[0])
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalContext64) Packed() bool {
- return s.Fpsimd64.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalContext64) MarshalUnsafe(dst []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (a *aarch64Ctx) SizeBytes() int {
return 8
}
@@ -590,3 +422,171 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes() +
+ 1*3568
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ dst[0] = byte(s.Reserved[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ s.Reserved[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index 956643160..862c286d9 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -23,156 +23,156 @@ var _ marshal.Marshallable = (*rlimit64)(nil)
var _ marshal.Marshallable = (*userSockFprog)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (o *oldDirentHdr) SizeBytes() int {
- return 18
+func (d *direntHdr) SizeBytes() int {
+ return 1 +
+ (*oldDirentHdr)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (o *oldDirentHdr) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
- dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
- dst = dst[2:]
+func (d *direntHdr) MarshalBytes(dst []byte) {
+ d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()])
+ dst = dst[d.OldHdr.SizeBytes():]
+ dst[0] = byte(d.Typ)
+ dst = dst[1:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
- o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
- src = src[2:]
+func (d *direntHdr) UnmarshalBytes(src []byte) {
+ d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()])
+ src = src[d.OldHdr.SizeBytes():]
+ d.Typ = uint8(src[0])
+ src = src[1:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (o *oldDirentHdr) Packed() bool {
+func (d *direntHdr) Packed() bool {
return false
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (o *oldDirentHdr) MarshalUnsafe(dst []byte) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
- o.MarshalBytes(dst)
+func (d *direntHdr) MarshalUnsafe(dst []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ d.MarshalBytes(dst)
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- o.UnmarshalBytes(src)
+func (d *direntHdr) UnmarshalUnsafe(src []byte) {
+ // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ d.UnmarshalBytes(src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
- o.MarshalBytes(buf) // escapes: fallback.
+func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+ d.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return o.CopyOutN(cc, addr, o.SizeBytes())
+func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return d.CopyOutN(cc, addr, d.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- o.UnmarshalBytes(buf) // escapes: fallback.
+ d.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) {
- // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, o.SizeBytes())
- o.MarshalBytes(buf)
+func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, d.SizeBytes())
+ d.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (d *direntHdr) SizeBytes() int {
- return 1 +
- (*oldDirentHdr)(nil).SizeBytes()
+func (o *oldDirentHdr) SizeBytes() int {
+ return 18
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (d *direntHdr) MarshalBytes(dst []byte) {
- d.OldHdr.MarshalBytes(dst[:d.OldHdr.SizeBytes()])
- dst = dst[d.OldHdr.SizeBytes():]
- dst[0] = byte(d.Typ)
- dst = dst[1:]
+func (o *oldDirentHdr) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Ino))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(o.Off))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(o.Reclen))
+ dst = dst[2:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (d *direntHdr) UnmarshalBytes(src []byte) {
- d.OldHdr.UnmarshalBytes(src[:d.OldHdr.SizeBytes()])
- src = src[d.OldHdr.SizeBytes():]
- d.Typ = uint8(src[0])
- src = src[1:]
+func (o *oldDirentHdr) UnmarshalBytes(src []byte) {
+ o.Ino = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Off = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ o.Reclen = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
}
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (d *direntHdr) Packed() bool {
+func (o *oldDirentHdr) Packed() bool {
return false
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (d *direntHdr) MarshalUnsafe(dst []byte) {
- // Type direntHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
- d.MarshalBytes(dst)
+func (o *oldDirentHdr) MarshalUnsafe(dst []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to MarshalBytes.
+ o.MarshalBytes(dst)
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (d *direntHdr) UnmarshalUnsafe(src []byte) {
- // Type direntHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- d.UnmarshalBytes(src)
+func (o *oldDirentHdr) UnmarshalUnsafe(src []byte) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ o.UnmarshalBytes(src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (d *direntHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
- d.MarshalBytes(buf) // escapes: fallback.
+func (o *oldDirentHdr) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
+ o.MarshalBytes(buf) // escapes: fallback.
return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
}
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (d *direntHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return d.CopyOutN(cc, addr, d.SizeBytes())
+func (o *oldDirentHdr) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return o.CopyOutN(cc, addr, o.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (d *direntHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(d.SizeBytes()) // escapes: okay.
+func (o *oldDirentHdr) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(o.SizeBytes()) // escapes: okay.
length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
// Unmarshal unconditionally. If we had a short copy-in, this results in a
// partially unmarshalled struct.
- d.UnmarshalBytes(buf) // escapes: fallback.
+ o.UnmarshalBytes(buf) // escapes: fallback.
return length, err
}
// WriteTo implements io.WriterTo.WriteTo.
-func (d *direntHdr) WriteTo(writer io.Writer) (int64, error) {
- // Type direntHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, d.SizeBytes())
- d.MarshalBytes(buf)
+func (o *oldDirentHdr) WriteTo(writer io.Writer) (int64, error) {
+ // Type oldDirentHdr doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, o.SizeBytes())
+ o.MarshalBytes(buf)
length, err := writer.Write(buf)
return int64(length), err
}
diff --git a/pkg/tcpip/network/fragmentation/frag_heap.go b/pkg/tcpip/network/fragmentation/frag_heap.go
deleted file mode 100644
index 0b570d25a..000000000
--- a/pkg/tcpip/network/fragmentation/frag_heap.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fragmentation
-
-import (
- "container/heap"
- "fmt"
-
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
-)
-
-type fragment struct {
- offset uint16
- vv buffer.VectorisedView
-}
-
-type fragHeap []fragment
-
-func (h *fragHeap) Len() int {
- return len(*h)
-}
-
-func (h *fragHeap) Less(i, j int) bool {
- return (*h)[i].offset < (*h)[j].offset
-}
-
-func (h *fragHeap) Swap(i, j int) {
- (*h)[i], (*h)[j] = (*h)[j], (*h)[i]
-}
-
-func (h *fragHeap) Push(x interface{}) {
- *h = append(*h, x.(fragment))
-}
-
-func (h *fragHeap) Pop() interface{} {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[:n-1]
- return x
-}
-
-// reassamble empties the heap and returns a VectorisedView
-// containing a reassambled version of the fragments inside the heap.
-func (h *fragHeap) reassemble() (buffer.VectorisedView, error) {
- curr := heap.Pop(h).(fragment)
- views := curr.vv.Views()
- size := curr.vv.Size()
-
- if curr.offset != 0 {
- return buffer.VectorisedView{}, fmt.Errorf("offset of the first packet is != 0 (%d)", curr.offset)
- }
-
- for h.Len() > 0 {
- curr := heap.Pop(h).(fragment)
- if int(curr.offset) < size {
- curr.vv.TrimFront(size - int(curr.offset))
- } else if int(curr.offset) > size {
- return buffer.VectorisedView{}, fmt.Errorf("packet has a hole, expected offset %d, got %d", size, curr.offset)
- }
- size += curr.vv.Size()
- views = append(views, curr.vv.Views()...)
- }
- return buffer.NewVectorisedView(size, views), nil
-}
diff --git a/pkg/tcpip/network/fragmentation/fragmentation.go b/pkg/tcpip/network/fragmentation/fragmentation.go
index d31296a41..1af87d713 100644
--- a/pkg/tcpip/network/fragmentation/fragmentation.go
+++ b/pkg/tcpip/network/fragmentation/fragmentation.go
@@ -53,6 +53,10 @@ var (
// ErrFragmentOverlap indicates that, during reassembly, a fragment overlaps
// with another one.
ErrFragmentOverlap = errors.New("overlapping fragments")
+
+ // ErrFragmentConflict indicates that, during reassembly, some fragments are
+ // in conflict with one another.
+ ErrFragmentConflict = errors.New("conflicting fragments")
)
// FragmentID is the identifier for a fragment.
diff --git a/pkg/tcpip/network/fragmentation/reassembler.go b/pkg/tcpip/network/fragmentation/reassembler.go
index 04072d966..9b20bb1d8 100644
--- a/pkg/tcpip/network/fragmentation/reassembler.go
+++ b/pkg/tcpip/network/fragmentation/reassembler.go
@@ -15,9 +15,8 @@
package fragmentation
import (
- "container/heap"
- "fmt"
"math"
+ "sort"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
@@ -29,6 +28,8 @@ type hole struct {
first uint16
last uint16
filled bool
+ final bool
+ data buffer.View
}
type reassembler struct {
@@ -39,7 +40,6 @@ type reassembler struct {
mu sync.Mutex
holes []hole
filled int
- heap fragHeap
done bool
creationTime int64
pkt *stack.PacketBuffer
@@ -48,51 +48,71 @@ type reassembler struct {
func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
r := &reassembler{
id: id,
- holes: make([]hole, 0, 16),
- heap: make(fragHeap, 0, 8),
creationTime: clock.NowMonotonic(),
}
r.holes = append(r.holes, hole{
first: 0,
last: math.MaxUint16,
filled: false,
+ final: true,
})
return r
}
-// updateHoles updates the list of holes for an incoming fragment. It returns
-// true if the fragment fits, it is not a duplicate and it does not overlap with
-// another fragment.
-//
-// For IPv6, overlaps with an existing fragment are explicitly forbidden by
-// RFC 8200 section 4.5:
-// If any of the fragments being reassembled overlap with any other fragments
-// being reassembled for the same packet, reassembly of that packet must be
-// abandoned and all the fragments that have been received for that packet
-// must be discarded, and no ICMP error messages should be sent.
-//
-// It is not explicitly forbidden for IPv4, but to keep parity with Linux we
-// disallow it as well:
-// https://github.com/torvalds/linux/blob/38525c6/net/ipv4/inet_fragment.c#L349
-func (r *reassembler) updateHoles(first, last uint16, more bool) (bool, error) {
+func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (buffer.VectorisedView, uint8, bool, int, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.done {
+ // A concurrent goroutine might have already reassembled
+ // the packet and emptied the heap while this goroutine
+ // was waiting on the mutex. We don't have to do anything in this case.
+ return buffer.VectorisedView{}, 0, false, 0, nil
+ }
+
+ var holeFound bool
+ var consumed int
for i := range r.holes {
currentHole := &r.holes[i]
- if currentHole.filled || last < currentHole.first || currentHole.last < first {
+ if last < currentHole.first || currentHole.last < first {
continue
}
-
+ // For IPv6, overlaps with an existing fragment are explicitly forbidden by
+ // RFC 8200 section 4.5:
+ // If any of the fragments being reassembled overlap with any other
+ // fragments being reassembled for the same packet, reassembly of that
+ // packet must be abandoned and all the fragments that have been received
+ // for that packet must be discarded, and no ICMP error messages should be
+ // sent.
+ //
+ // It is not explicitly forbidden for IPv4, but to keep parity with Linux we
+ // disallow it as well:
+ // https://github.com/torvalds/linux/blob/38525c6/net/ipv4/inet_fragment.c#L349
if first < currentHole.first || currentHole.last < last {
// Incoming fragment only partially fits in the free hole.
- return false, ErrFragmentOverlap
+ return buffer.VectorisedView{}, 0, false, 0, ErrFragmentOverlap
+ }
+ if !more {
+ if !currentHole.final || currentHole.filled && currentHole.last != last {
+ // We have another final fragment, which does not perfectly overlap.
+ return buffer.VectorisedView{}, 0, false, 0, ErrFragmentConflict
+ }
}
- r.filled++
+ holeFound = true
+ if currentHole.filled {
+ // Incoming fragment is a duplicate.
+ continue
+ }
+
+ // We are populating the current hole with the payload and creating a new
+ // hole for any unfilled ranges on either end.
if first > currentHole.first {
r.holes = append(r.holes, hole{
first: currentHole.first,
last: first - 1,
filled: false,
+ final: false,
})
}
if last < currentHole.last && more {
@@ -100,39 +120,22 @@ func (r *reassembler) updateHoles(first, last uint16, more bool) (bool, error) {
first: last + 1,
last: currentHole.last,
filled: false,
+ final: currentHole.final,
})
+ currentHole.final = false
}
+ v := pkt.Data.ToOwnedView()
+ consumed = v.Size()
+ r.size += consumed
// Update the current hole to precisely match the incoming fragment.
r.holes[i] = hole{
first: first,
last: last,
filled: true,
+ final: currentHole.final,
+ data: v,
}
- return true, nil
- }
-
- // Incoming fragment is a duplicate/subset, or its offset comes after the end
- // of the reassembled payload.
- return false, nil
-}
-
-func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (buffer.VectorisedView, uint8, bool, int, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.done {
- // A concurrent goroutine might have already reassembled
- // the packet and emptied the heap while this goroutine
- // was waiting on the mutex. We don't have to do anything in this case.
- return buffer.VectorisedView{}, 0, false, 0, nil
- }
-
- used, err := r.updateHoles(first, last, more)
- if err != nil {
- return buffer.VectorisedView{}, 0, false, 0, fmt.Errorf("fragment reassembly failed: %w", err)
- }
-
- var consumed int
- if used {
+ r.filled++
// For IPv6, it is possible to have different Protocol values between
// fragments of a packet (because, unlike IPv4, the Protocol is not used to
// identify a fragment). In this case, only the Protocol of the first
@@ -145,22 +148,30 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s
r.pkt = pkt
r.proto = proto
}
- vv := pkt.Data
- // We store the incoming packet only if it filled some holes.
- heap.Push(&r.heap, fragment{offset: first, vv: vv.Clone(nil)})
- consumed = vv.Size()
- r.size += consumed
+
+ break
+ }
+ if !holeFound {
+ // Incoming fragment is beyond end.
+ return buffer.VectorisedView{}, 0, false, 0, ErrFragmentConflict
}
// Check if all the holes have been filled and we are ready to reassemble.
if r.filled < len(r.holes) {
return buffer.VectorisedView{}, 0, false, consumed, nil
}
- res, err := r.heap.reassemble()
- if err != nil {
- return buffer.VectorisedView{}, 0, false, 0, fmt.Errorf("fragment reassembly failed: %w", err)
+
+ sort.Slice(r.holes, func(i, j int) bool {
+ return r.holes[i].first < r.holes[j].first
+ })
+
+ var size int
+ views := make([]buffer.View, 0, len(r.holes))
+ for _, hole := range r.holes {
+ views = append(views, hole.data)
+ size += hole.data.Size()
}
- return res, r.proto, true, consumed, nil
+ return buffer.NewVectorisedView(size, views), r.proto, true, consumed, nil
}
func (r *reassembler) checkDoneOrMark() bool {