summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/arch
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/arch')
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go396
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go582
-rw-r--r--pkg/sentry/arch/arch_arm64_state_autogen.go1
-rw-r--r--pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go7
-rw-r--r--pkg/sentry/arch/arch_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/arch/signal_amd64.go27
-rw-r--r--pkg/sentry/arch/signal_arm64.go30
-rw-r--r--pkg/sentry/arch/stack.go179
-rw-r--r--pkg/sentry/arch/stack_unsafe.go69
9 files changed, 1163 insertions, 131 deletions
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index 8eadb6e22..466bf889a 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -7,5 +7,401 @@
package arch
import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
)
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*SignalContext64)(nil)
+var _ marshal.Marshallable = (*SignalStack)(nil)
+var _ marshal.Marshallable = (*UContext64)(nil)
+var _ marshal.Marshallable = (*linux.SignalSet)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 184 +
+ (*linux.SignalSet)(nil).SizeBytes() +
+ 8*8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
+ dst = dst[2:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
+ dst = dst[8:]
+ s.Oldmask.MarshalBytes(dst[:s.Oldmask.SizeBytes()])
+ dst = dst[s.Oldmask.SizeBytes():]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.R8 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R9 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R10 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R11 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R12 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R13 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R14 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.R15 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rdi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rsi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rbp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rbx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rdx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rcx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rsp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Rip = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Eflags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Cs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Gs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Fs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Ss = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ src = src[2:]
+ s.Err = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Trapno = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Oldmask.UnmarshalBytes(src[:s.Oldmask.SizeBytes()])
+ src = src[s.Oldmask.SizeBytes():]
+ s.Cr2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Fpstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s.Reserved[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Oldmask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Oldmask.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Oldmask.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Oldmask.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*SignalContext64)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index c8e4c7c24..aac25375e 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -2,9 +2,591 @@
// +build arm64
// +build arm64
+// +build arm64
package arch
import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/marshal"
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
)
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*FpsimdContext)(nil)
+var _ marshal.Marshallable = (*SignalContext64)(nil)
+var _ marshal.Marshallable = (*SignalStack)(nil)
+var _ marshal.Marshallable = (*UContext64)(nil)
+var _ marshal.Marshallable = (*aarch64Ctx)(nil)
+var _ marshal.Marshallable = (*linux.SignalSet)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalContext64) SizeBytes() int {
+ return 32 +
+ 8*31 +
+ 1*8 +
+ (*FpsimdContext)(nil).SizeBytes() +
+ 1*3568
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ dst = dst[8:]
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ dst = dst[8:]
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(s._pad[idx])
+ dst = dst[1:]
+ }
+ s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()])
+ dst = dst[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ dst[0] = byte(s.Reserved[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalContext64) UnmarshalBytes(src []byte) {
+ s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 31; idx++ {
+ s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ for idx := 0; idx < 8; idx++ {
+ s._pad[idx] = src[0]
+ src = src[1:]
+ }
+ s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()])
+ src = src[s.Fpsimd64.SizeBytes():]
+ for idx := 0; idx < 3568; idx++ {
+ s.Reserved[idx] = uint8(src[0])
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalContext64) Packed() bool {
+ return s.Fpsimd64.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalContext64) MarshalUnsafe(dst []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
+ if s.Fpsimd64.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !s.Fpsimd64.Packed() {
+ // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (a *aarch64Ctx) SizeBytes() int {
+ return 8
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (a *aarch64Ctx) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
+ dst = dst[4:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (a *aarch64Ctx) UnmarshalBytes(src []byte) {
+ a.Magic = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ a.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (a *aarch64Ctx) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (a *aarch64Ctx) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(a))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(a), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return a.CopyOutN(cc, addr, a.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (a *aarch64Ctx) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(a)))
+ hdr.Len = a.SizeBytes()
+ hdr.Cap = a.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that a
+ // must live until the use above.
+ runtime.KeepAlive(a) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (f *FpsimdContext) SizeBytes() int {
+ return 8 +
+ (*aarch64Ctx)(nil).SizeBytes() +
+ 8*64
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (f *FpsimdContext) MarshalBytes(dst []byte) {
+ f.Head.MarshalBytes(dst[:f.Head.SizeBytes()])
+ dst = dst[f.Head.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
+ dst = dst[4:]
+ for idx := 0; idx < 64; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
+ dst = dst[8:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (f *FpsimdContext) UnmarshalBytes(src []byte) {
+ f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()])
+ src = src[f.Head.SizeBytes():]
+ f.Fpsr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ f.Fpcr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ for idx := 0; idx < 64; idx++ {
+ f.Vregs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (f *FpsimdContext) Packed() bool {
+ return f.Head.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (f *FpsimdContext) MarshalUnsafe(dst []byte) {
+ if f.Head.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(f))
+ } else {
+ // Type FpsimdContext doesn't have a packed layout in memory, fallback to MarshalBytes.
+ f.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (f *FpsimdContext) UnmarshalUnsafe(src []byte) {
+ if f.Head.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(f), src)
+ } else {
+ // Type FpsimdContext doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ f.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ f.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return f.CopyOutN(cc, addr, f.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ f.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) {
+ if !f.Head.Packed() {
+ // Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, f.SizeBytes())
+ f.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(f)))
+ hdr.Len = f.SizeBytes()
+ hdr.Cap = f.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that f
+ // must live until the use above.
+ runtime.KeepAlive(f) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes() +
+ 1*120 +
+ 1*8 +
+ (*SignalContext64)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ dst[0] = byte(u._pad[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(u._pad2[idx])
+ dst = dst[1:]
+ }
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ u._pad[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ u._pad2[idx] = src[0]
+ src = src[1:]
+ }
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_arm64_state_autogen.go b/pkg/sentry/arch/arch_arm64_state_autogen.go
index a88aef9d0..4e69512bb 100644
--- a/pkg/sentry/arch/arch_arm64_state_autogen.go
+++ b/pkg/sentry/arch/arch_arm64_state_autogen.go
@@ -2,6 +2,7 @@
// +build arm64
// +build arm64
+// +build arm64
package arch
diff --git a/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go
new file mode 100644
index 000000000..2167ed9c9
--- /dev/null
+++ b/pkg/sentry/arch/arch_unsafe_abi_autogen_unsafe.go
@@ -0,0 +1,7 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_unsafe_state_autogen.go b/pkg/sentry/arch/arch_unsafe_state_autogen.go
new file mode 100644
index 000000000..9a45071b9
--- /dev/null
+++ b/pkg/sentry/arch/arch_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package arch
diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go
index 6fb756f0e..72e07a988 100644
--- a/pkg/sentry/arch/signal_amd64.go
+++ b/pkg/sentry/arch/signal_amd64.go
@@ -17,17 +17,19 @@
package arch
import (
- "encoding/binary"
"math"
"syscall"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/log"
+ "gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/usermem"
)
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
// second argument to signal handlers set by signal(2).
+//
+// +marshal
type SignalContext64 struct {
R8 uint64
R9 uint64
@@ -68,6 +70,8 @@ const (
)
// UContext64 is equivalent to ucontext_t on 64-bit x86.
+//
+// +marshal
type UContext64 struct {
Flags uint64
Link uint64
@@ -172,12 +176,7 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
// "... the value (%rsp+8) is always a multiple of 16 (...) when
// control is transferred to the function entry point." - AMD64 ABI
- ucSize := binary.Size(uc)
- if ucSize < 0 {
- // This can only happen if we've screwed up the definition of
- // UContext64.
- panic("can't get size of UContext64")
- }
+ ucSize := uc.SizeBytes()
// st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128.
frameSize := int(st.Arch.Width()) + ucSize + 128
frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8
@@ -195,18 +194,18 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
info.FixSignalCodeForUser()
// Set up the stack frame.
- infoAddr, err := st.Push(info)
- if err != nil {
+ if _, err := info.CopyOut(st, StackBottomMagic); err != nil {
return err
}
- ucAddr, err := st.Push(uc)
- if err != nil {
+ infoAddr := st.Bottom
+ if _, err := uc.CopyOut(st, StackBottomMagic); err != nil {
return err
}
+ ucAddr := st.Bottom
if act.HasRestorer() {
// Push the restorer return address.
// Note that this doesn't need to be popped.
- if _, err := st.Push(usermem.Addr(act.Restorer)); err != nil {
+ if _, err := primitive.CopyUint64Out(st, StackBottomMagic, act.Restorer); err != nil {
return err
}
} else {
@@ -240,11 +239,11 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error) {
// Copy out the stack frame.
var uc UContext64
- if _, err := st.Pop(&uc); err != nil {
+ if _, err := uc.CopyIn(st, StackBottomMagic); err != nil {
return 0, SignalStack{}, err
}
var info SignalInfo
- if _, err := st.Pop(&info); err != nil {
+ if _, err := info.CopyIn(st, StackBottomMagic); err != nil {
return 0, SignalStack{}, err
}
diff --git a/pkg/sentry/arch/signal_arm64.go b/pkg/sentry/arch/signal_arm64.go
index 642c79dda..7fde5d34e 100644
--- a/pkg/sentry/arch/signal_arm64.go
+++ b/pkg/sentry/arch/signal_arm64.go
@@ -12,10 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// +build arm64
+
package arch
import (
- "encoding/binary"
"syscall"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -25,6 +26,8 @@ import (
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
// second argument to signal handlers set by signal(2).
+//
+// +marshal
type SignalContext64 struct {
FaultAddr uint64
Regs [31]uint64
@@ -36,6 +39,7 @@ type SignalContext64 struct {
Reserved [3568]uint8
}
+// +marshal
type aarch64Ctx struct {
Magic uint32
Size uint32
@@ -43,6 +47,8 @@ type aarch64Ctx struct {
// FpsimdContext is equivalent to struct fpsimd_context on arm64
// (arch/arm64/include/uapi/asm/sigcontext.h).
+//
+// +marshal
type FpsimdContext struct {
Head aarch64Ctx
Fpsr uint32
@@ -51,13 +57,15 @@ type FpsimdContext struct {
}
// UContext64 is equivalent to ucontext on arm64(arch/arm64/include/uapi/asm/ucontext.h).
+//
+// +marshal
type UContext64 struct {
Flags uint64
Link uint64
Stack SignalStack
Sigset linux.SignalSet
// glibc uses a 1024-bit sigset_t
- _pad [(1024 - 64) / 8]byte
+ _pad [120]byte // (1024 - 64) / 8 = 120
// sigcontext must be aligned to 16-byte
_pad2 [8]byte
// last for future expansion
@@ -94,11 +102,7 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
},
Sigset: sigset,
}
-
- ucSize := binary.Size(uc)
- if ucSize < 0 {
- panic("can't get size of UContext64")
- }
+ ucSize := uc.SizeBytes()
// frameSize = ucSize + sizeof(siginfo).
// sizeof(siginfo) == 128.
@@ -119,14 +123,14 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
info.FixSignalCodeForUser()
// Set up the stack frame.
- infoAddr, err := st.Push(info)
- if err != nil {
+ if _, err := info.CopyOut(st, StackBottomMagic); err != nil {
return err
}
- ucAddr, err := st.Push(uc)
- if err != nil {
+ infoAddr := st.Bottom
+ if _, err := uc.CopyOut(st, StackBottomMagic); err != nil {
return err
}
+ ucAddr := st.Bottom
// Set up registers.
c.Regs.Sp = uint64(st.Bottom)
@@ -147,11 +151,11 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error) {
// Copy out the stack frame.
var uc UContext64
- if _, err := st.Pop(&uc); err != nil {
+ if _, err := uc.CopyIn(st, StackBottomMagic); err != nil {
return 0, SignalStack{}, err
}
var info SignalInfo
- if _, err := st.Pop(&info); err != nil {
+ if _, err := info.CopyIn(st, StackBottomMagic); err != nil {
return 0, SignalStack{}, err
}
diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go
index 1108fa0bd..5f06c751d 100644
--- a/pkg/sentry/arch/stack.go
+++ b/pkg/sentry/arch/stack.go
@@ -15,14 +15,16 @@
package arch
import (
- "encoding/binary"
- "fmt"
-
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/usermem"
)
-// Stack is a simple wrapper around a usermem.IO and an address.
+// Stack is a simple wrapper around a usermem.IO and an address. Stack
+// implements marshal.CopyContext, and marshallable values can be pushed or
+// popped from the stack through the marshal.Marshallable interface.
+//
+// Stack is not thread-safe.
type Stack struct {
// Our arch info.
// We use this for automatic Native conversion of usermem.Addrs during
@@ -34,105 +36,60 @@ type Stack struct {
// Our current stack bottom.
Bottom usermem.Addr
-}
-// Push pushes the given values on to the stack.
-//
-// (This method supports Addrs and treats them as native types.)
-func (s *Stack) Push(vals ...interface{}) (usermem.Addr, error) {
- for _, v := range vals {
-
- // We convert some types to well-known serializable quanities.
- var norm interface{}
-
- // For array types, we will automatically add an appropriate
- // terminal value. This is done simply to make the interface
- // easier to use.
- var term interface{}
-
- switch v.(type) {
- case string:
- norm = []byte(v.(string))
- term = byte(0)
- case []int8, []uint8:
- norm = v
- term = byte(0)
- case []int16, []uint16:
- norm = v
- term = uint16(0)
- case []int32, []uint32:
- norm = v
- term = uint32(0)
- case []int64, []uint64:
- norm = v
- term = uint64(0)
- case []usermem.Addr:
- // Special case: simply push recursively.
- _, err := s.Push(s.Arch.Native(uintptr(0)))
- if err != nil {
- return 0, err
- }
- varr := v.([]usermem.Addr)
- for i := len(varr) - 1; i >= 0; i-- {
- _, err := s.Push(varr[i])
- if err != nil {
- return 0, err
- }
- }
- continue
- case usermem.Addr:
- norm = s.Arch.Native(uintptr(v.(usermem.Addr)))
- default:
- norm = v
- }
+ // Scratch buffer used for marshalling to avoid having to repeatedly
+ // allocate scratch memory.
+ scratchBuf []byte
+}
- if term != nil {
- _, err := s.Push(term)
- if err != nil {
- return 0, err
- }
- }
+// scratchBufLen is the default length of Stack.scratchBuf. The
+// largest structs the stack regularly serializes are arch.SignalInfo
+// and arch.UContext64. We'll set the default size as the larger of
+// the two, arch.UContext64.
+var scratchBufLen = (*UContext64)(nil).SizeBytes()
- c := binary.Size(norm)
- if c < 0 {
- return 0, fmt.Errorf("bad binary.Size for %T", v)
- }
- n, err := usermem.CopyObjectOut(context.Background(), s.IO, s.Bottom-usermem.Addr(c), norm, usermem.IOOpts{})
- if err != nil || c != n {
- return 0, err
- }
+// CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer.
+func (s *Stack) CopyScratchBuffer(size int) []byte {
+ if len(s.scratchBuf) < size {
+ s.scratchBuf = make([]byte, size)
+ }
+ return s.scratchBuf[:size]
+}
+// StackBottomMagic is the special address callers must past to all stack
+// marshalling operations to cause the src/dst address to be computed based on
+// the current end of the stack.
+const StackBottomMagic = ^usermem.Addr(0) // usermem.Addr(-1)
+
+// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. CopyOutBytes
+// computes an appropriate address based on the current end of the
+// stack. Callers use the sentinel address StackBottomMagic to marshal methods
+// to indicate this.
+func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) {
+ if sentinel != StackBottomMagic {
+ panic("Attempted to copy out to stack with absolute address")
+ }
+ c := len(b)
+ n, err := s.IO.CopyOut(context.Background(), s.Bottom-usermem.Addr(c), b, usermem.IOOpts{})
+ if err == nil && n == c {
s.Bottom -= usermem.Addr(n)
}
-
- return s.Bottom, nil
+ return n, err
}
-// Pop pops the given values off the stack.
-//
-// (This method supports Addrs and treats them as native types.)
-func (s *Stack) Pop(vals ...interface{}) (usermem.Addr, error) {
- for _, v := range vals {
-
- vaddr, isVaddr := v.(*usermem.Addr)
-
- var n int
- var err error
- if isVaddr {
- value := s.Arch.Native(uintptr(0))
- n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, value, usermem.IOOpts{})
- *vaddr = usermem.Addr(s.Arch.Value(value))
- } else {
- n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, v, usermem.IOOpts{})
- }
- if err != nil {
- return 0, err
- }
-
+// CopyInBytes implements marshal.CopyContext.CopyInBytes. CopyInBytes computes
+// an appropriate address based on the current end of the stack. Callers must
+// use the sentinel address StackBottomMagic to marshal methods to indicate
+// this.
+func (s *Stack) CopyInBytes(sentinel usermem.Addr, b []byte) (int, error) {
+ if sentinel != StackBottomMagic {
+ panic("Attempted to copy in from stack with absolute address")
+ }
+ n, err := s.IO.CopyIn(context.Background(), s.Bottom, b, usermem.IOOpts{})
+ if err == nil {
s.Bottom += usermem.Addr(n)
}
-
- return s.Bottom, nil
+ return n, err
}
// Align aligns the stack to the given offset.
@@ -142,6 +99,22 @@ func (s *Stack) Align(offset int) {
}
}
+// PushNullTerminatedByteSlice writes bs to the stack, followed by an extra null
+// byte at the end. On error, the contents of the stack and the bottom cursor
+// are undefined.
+func (s *Stack) PushNullTerminatedByteSlice(bs []byte) (int, error) {
+ // Note: Stack grows up, so write the terminal null byte first.
+ nNull, err := primitive.CopyUint8Out(s, StackBottomMagic, 0)
+ if err != nil {
+ return 0, err
+ }
+ n, err := primitive.CopyByteSliceOut(s, StackBottomMagic, bs)
+ if err != nil {
+ return 0, err
+ }
+ return n + nNull, nil
+}
+
// StackLayout describes the location of the arguments and environment on the
// stack.
type StackLayout struct {
@@ -177,11 +150,10 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
l.EnvvEnd = s.Bottom
envAddrs := make([]usermem.Addr, len(env))
for i := len(env) - 1; i >= 0; i-- {
- addr, err := s.Push(env[i])
- if err != nil {
+ if _, err := s.PushNullTerminatedByteSlice([]byte(env[i])); err != nil {
return StackLayout{}, err
}
- envAddrs[i] = addr
+ envAddrs[i] = s.Bottom
}
l.EnvvStart = s.Bottom
@@ -189,11 +161,10 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
l.ArgvEnd = s.Bottom
argAddrs := make([]usermem.Addr, len(args))
for i := len(args) - 1; i >= 0; i-- {
- addr, err := s.Push(args[i])
- if err != nil {
+ if _, err := s.PushNullTerminatedByteSlice([]byte(args[i])); err != nil {
return StackLayout{}, err
}
- argAddrs[i] = addr
+ argAddrs[i] = s.Bottom
}
l.ArgvStart = s.Bottom
@@ -222,26 +193,26 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
auxv = append(auxv, usermem.Addr(a.Key), a.Value)
}
auxv = append(auxv, usermem.Addr(0))
- _, err := s.Push(auxv)
+ _, err := s.pushAddrSliceAndTerminator(auxv)
if err != nil {
return StackLayout{}, err
}
// Push environment.
- _, err = s.Push(envAddrs)
+ _, err = s.pushAddrSliceAndTerminator(envAddrs)
if err != nil {
return StackLayout{}, err
}
// Push args.
- _, err = s.Push(argAddrs)
+ _, err = s.pushAddrSliceAndTerminator(argAddrs)
if err != nil {
return StackLayout{}, err
}
// Push arg count.
- _, err = s.Push(usermem.Addr(len(args)))
- if err != nil {
+ lenP := s.Arch.Native(uintptr(len(args)))
+ if _, err = lenP.CopyOut(s, StackBottomMagic); err != nil {
return StackLayout{}, err
}
diff --git a/pkg/sentry/arch/stack_unsafe.go b/pkg/sentry/arch/stack_unsafe.go
new file mode 100644
index 000000000..a90d297ee
--- /dev/null
+++ b/pkg/sentry/arch/stack_unsafe.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+import (
+ "reflect"
+ "runtime"
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/marshal/primitive"
+ "gvisor.dev/gvisor/pkg/usermem"
+)
+
+// pushAddrSliceAndTerminator copies a slices of addresses to the stack, and
+// also pushes an extra null address element at the end of the slice.
+//
+// Internally, we unsafely transmute the slice type from the arch-dependent
+// []usermem.Addr type, to a slice of fixed-sized ints so that we can pass it to
+// go-marshal.
+//
+// On error, the contents of the stack and the bottom cursor are undefined.
+func (s *Stack) pushAddrSliceAndTerminator(src []usermem.Addr) (int, error) {
+ // Note: Stack grows upwards, so push the terminator first.
+ srcHdr := (*reflect.SliceHeader)(unsafe.Pointer(&src))
+ switch s.Arch.Width() {
+ case 8:
+ nNull, err := primitive.CopyUint64Out(s, StackBottomMagic, 0)
+ if err != nil {
+ return 0, err
+ }
+ var dst []uint64
+ dstHdr := (*reflect.SliceHeader)(unsafe.Pointer(&dst))
+ dstHdr.Data = srcHdr.Data
+ dstHdr.Len = srcHdr.Len
+ dstHdr.Cap = srcHdr.Cap
+ n, err := primitive.CopyUint64SliceOut(s, StackBottomMagic, dst)
+ // Ensures src doesn't get GCed until we're done using it through dst.
+ runtime.KeepAlive(src)
+ return n + nNull, err
+ case 4:
+ nNull, err := primitive.CopyUint32Out(s, StackBottomMagic, 0)
+ if err != nil {
+ return 0, err
+ }
+ var dst []uint32
+ dstHdr := (*reflect.SliceHeader)(unsafe.Pointer(&dst))
+ dstHdr.Data = srcHdr.Data
+ dstHdr.Len = srcHdr.Len
+ dstHdr.Cap = srcHdr.Cap
+ n, err := primitive.CopyUint32SliceOut(s, StackBottomMagic, dst)
+ // Ensure src doesn't get GCed until we're done using it through dst.
+ runtime.KeepAlive(src)
+ return n + nNull, err
+ default:
+ panic("Unsupported arch width")
+ }
+}