summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-12-12 17:18:09 +0000
committergVisor bot <gvisor-bot@google.com>2020-12-12 17:18:09 +0000
commit5399efbe29e89cbd2865ace6505adb3d6deee9d5 (patch)
treef640c5bb6586239f3c290e2f06b2454a6e319dcd /pkg/sentry
parenta49eb922dea2363b731c91f556d6c21abd344dce (diff)
parent4aef908c92c8530222fe547c154e2ee45a130b1b (diff)
Merge release-20201208.0-39-g4aef908c9 (automated)
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go220
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go286
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go336
-rw-r--r--pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go178
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go200
5 files changed, 610 insertions, 610 deletions
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index 1bd458b68..fe68f921d 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -23,116 +23,6 @@ var _ marshal.Marshallable = (*SignalStack)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalInfo) SizeBytes() int {
- return 16 +
- 1*(128-16)
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- for idx := 0; idx < (128-16); idx++ {
- dst[0] = byte(s.Fields[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalInfo) UnmarshalBytes(src []byte) {
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- for idx := 0; idx < (128-16); idx++ {
- s.Fields[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalInfo) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalAct) SizeBytes() int {
return 24 +
(*linux.SignalSet)(nil).SizeBytes()
@@ -370,3 +260,113 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalInfo) SizeBytes() int {
+ return 16 +
+ 1*(128-16)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalInfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ dst[0] = byte(s.Fields[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalInfo) UnmarshalBytes(src []byte) {
+ s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ s.Fields[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalInfo) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index 466bf889a..cdbc88377 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -25,6 +25,149 @@ var _ marshal.Marshallable = (*UContext64)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*SignalContext64)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalContext64) SizeBytes() int {
return 184 +
(*linux.SignalSet)(nil).SizeBytes() +
@@ -262,146 +405,3 @@ func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *UContext64) SizeBytes() int {
- return 16 +
- (*SignalStack)(nil).SizeBytes() +
- (*SignalContext64)(nil).SizeBytes() +
- (*linux.SignalSet)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
- dst = dst[8:]
- u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
- dst = dst[u.Stack.SizeBytes():]
- u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
- dst = dst[u.MContext.SizeBytes():]
- u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
- dst = dst[u.Sigset.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
- src = src[u.Stack.SizeBytes():]
- u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
- src = src[u.MContext.SizeBytes():]
- u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
- src = src[u.Sigset.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *UContext64) Packed() bool {
- return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *UContext64) MarshalUnsafe(dst []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- u.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *UContext64) UnmarshalUnsafe(src []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyOut(unsafe.Pointer(u), src)
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- u.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- u.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- u.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, u.SizeBytes())
- u.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index b8667cdb9..aac25375e 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -27,6 +27,174 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes() +
+ 1*3568
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ dst[0] = byte(s.Reserved[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ s.Reserved[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (a *aarch64Ctx) SizeBytes() int {
return 8
}
@@ -422,171 +590,3 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalContext64) SizeBytes() int {
- return 32 +
- 8*31 +
- 1*8 +
- (*FpsimdContext)(nil).SizeBytes() +
- 1*3568
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
- dst = dst[8:]
- for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
- dst = dst[8:]
- }
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
- dst = dst[8:]
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(s._pad[idx])
- dst = dst[1:]
- }
- s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
- dst = dst[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- dst[0] = byte(s.Reserved[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- }
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- for idx := 0; idx < 8; idx++ {
- s._pad[idx] = src[0]
- src = src[1:]
- }
- s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
- src = src[s.Fpsimd64.SizeBytes():]
- for idx := 0; idx < 3568; idx++ {
- s.Reserved[idx] = uint8(src[0])
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalContext64) Packed() bool {
- return s.Fpsimd64.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalContext64) MarshalUnsafe(dst []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- s.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
- if s.Fpsimd64.Packed() {
- safecopy.CopyOut(unsafe.Pointer(s), src)
- } else {
- // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- s.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- s.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- s.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
- if !s.Fpsimd64.Packed() {
- // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, s.SizeBytes())
- s.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
index d0c9ec652..da6aad4a5 100644
--- a/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
+++ b/pkg/sentry/kernel/auth/auth_abi_autogen_unsafe.go
@@ -19,6 +19,95 @@ var _ marshal.Marshallable = (*UID)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
//go:nosplit
+func (uid *UID) SizeBytes() int {
+ return 4
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (uid *UID) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid))
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (uid *UID) UnmarshalBytes(src []byte) {
+ *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4])))
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (uid *UID) Packed() bool {
+ // Scalar newtypes are always packed.
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (uid *UID) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(uid))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (uid *UID) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(uid), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return uid.CopyOutN(cc, addr, uid.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (uid *UID) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
+ hdr.Len = uid.SizeBytes()
+ hdr.Cap = uid.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that uid
+ // must live until the use above.
+ runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+//go:nosplit
func (gid *GID) SizeBytes() int {
return 4
}
@@ -194,92 +283,3 @@ func UnmarshalUnsafeGIDSlice(dst []GID, src []byte) (int, error) {
return length, err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-//go:nosplit
-func (uid *UID) SizeBytes() int {
- return 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (uid *UID) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(*uid))
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (uid *UID) UnmarshalBytes(src []byte) {
- *uid = UID(uint32(usermem.ByteOrder.Uint32(src[:4])))
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (uid *UID) Packed() bool {
- // Scalar newtypes are always packed.
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (uid *UID) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(uid))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (uid *UID) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(uid), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (uid *UID) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
- hdr.Len = uid.SizeBytes()
- hdr.Cap = uid.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that uid
- // must live until the use above.
- runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (uid *UID) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return uid.CopyOutN(cc, addr, uid.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (uid *UID) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
- hdr.Len = uid.SizeBytes()
- hdr.Cap = uid.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that uid
- // must live until the use above.
- runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (uid *UID) WriteTo(w io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(uid)))
- hdr.Len = uid.SizeBytes()
- hdr.Cap = uid.SizeBytes()
-
- length, err := w.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that uid
- // must live until the use above.
- runtime.KeepAlive(uid) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
index 516ea1842..ccd56285a 100644
--- a/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/vfs2_abi_autogen_unsafe.go
@@ -112,26 +112,49 @@ func (s *sigSetWithSize) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *multipleMessageHeader64) SizeBytes() int {
- return 8 +
- (*MessageHeader64)(nil).SizeBytes()
+func (m *MessageHeader64) SizeBytes() int {
+ return 56
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
- m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
- dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+func (m *MessageHeader64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
- m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
- src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *MessageHeader64) UnmarshalBytes(src []byte) {
+ m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -139,40 +162,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *multipleMessageHeader64) Packed() bool {
- return m.msgHdr.Packed()
+func (m *MessageHeader64) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- m.MarshalBytes(dst)
- }
+func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(m), src)
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- m.UnmarshalBytes(src)
- }
+func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- m.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -189,23 +195,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- m.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -221,15 +217,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, m.SizeBytes())
- m.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -245,49 +233,26 @@ func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *MessageHeader64) SizeBytes() int {
- return 56
+func (m *multipleMessageHeader64) SizeBytes() int {
+ return 8 +
+ (*MessageHeader64)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
+ m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
+ dst = dst[m.msgHdr.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
+ m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
+ src = src[m.msgHdr.SizeBytes():]
+ m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -295,23 +260,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *MessageHeader64) Packed() bool {
- return true
+func (m *multipleMessageHeader64) Packed() bool {
+ return m.msgHdr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
+func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(m), src)
+func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -328,13 +310,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -350,7 +342,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))