summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-04-26 06:58:43 +0000
committergVisor bot <gvisor-bot@google.com>2020-04-26 06:58:43 +0000
commit607f2aa5140945ba6a3b0b2af8f769afff3deb85 (patch)
treec61fb8a65812dd90ddb57c0884c37553f182d323
parentf37d337a7047c32a3a7d35f6248bd72710cb6270 (diff)
parent3c67754663f424f2ebbc0ff2a4c80e30618d5355 (diff)
Merge release-20200323.0-251-g3c67754 (automated)
-rwxr-xr-xpkg/abi/linux/linux_amd64_abi_autogen_unsafe.go199
-rwxr-xr-xpkg/abi/linux/linux_amd64_state_autogen.go72
-rwxr-xr-xpkg/abi/linux/linux_arm64_abi_autogen_unsafe.go112
-rwxr-xr-xpkg/abi/linux/linux_arm64_state_autogen.go26
-rwxr-xr-xpkg/abi/linux/ptrace_amd64.go52
-rwxr-xr-xpkg/abi/linux/ptrace_arm64.go (renamed from pkg/sentry/arch/arch_state_aarch64.go)25
-rwxr-xr-xpkg/sentry/arch/arch_aarch64.go26
-rwxr-xr-xpkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go9
-rwxr-xr-xpkg/sentry/arch/arch_aarch64_state_autogen.go1
-rwxr-xr-xpkg/sentry/arch/arch_abi_autogen_unsafe.go372
-rw-r--r--pkg/sentry/arch/arch_amd64.go13
-rwxr-xr-xpkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go11
-rwxr-xr-xpkg/sentry/arch/arch_arm64.go2
-rwxr-xr-xpkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go10
-rwxr-xr-xpkg/sentry/arch/arch_arm64_state_autogen.go1
-rwxr-xr-xpkg/sentry/arch/arch_impl_abi_autogen_unsafe.go9
-rwxr-xr-xpkg/sentry/arch/arch_impl_state_autogen.go5
-rwxr-xr-xpkg/sentry/arch/arch_state_autogen.go64
-rw-r--r--pkg/sentry/arch/arch_state_x86.go42
-rw-r--r--pkg/sentry/arch/arch_x86.go25
-rwxr-xr-xpkg/sentry/arch/arch_x86_impl.go4
-rwxr-xr-xpkg/sentry/arch/signal.go3
-rw-r--r--pkg/sentry/arch/signal_act.go4
-rw-r--r--pkg/sentry/arch/signal_stack.go3
-rw-r--r--pkg/sentry/kernel/task_signals.go8
-rwxr-xr-xpkg/sentry/platform/kvm/kvm_arm64.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_amd64.go7
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_arm64.go5
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_unsafe.go4
-rw-r--r--pkg/sentry/platform/ptrace/subprocess.go8
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_amd64.go16
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_arm64.go16
-rwxr-xr-xpkg/sentry/platform/ring0/defs_impl_amd64.go16
-rwxr-xr-xpkg/sentry/platform/ring0/defs_impl_arm64.go14
-rw-r--r--pkg/sentry/platform/ring0/entry_amd64.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_signal.go10
36 files changed, 986 insertions, 219 deletions
diff --git a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
index 3553526b7..1ab6ec3f5 100755
--- a/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_amd64_abi_autogen_unsafe.go
@@ -3,6 +3,7 @@
// +build amd64
// +build amd64
// +build amd64
+// +build amd64
package linux
@@ -19,6 +20,7 @@ import (
// Marshallable types used by this file.
var _ marshal.Marshallable = (*EpollEvent)(nil)
+var _ marshal.Marshallable = (*PtraceRegs)(nil)
var _ marshal.Marshallable = (*Stat)(nil)
var _ marshal.Marshallable = (*Timespec)(nil)
@@ -300,7 +302,7 @@ func (s *Stat) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
- if s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
@@ -310,7 +312,7 @@ func (s *Stat) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (s *Stat) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
+ if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
s.MarshalBytes(buf) // escapes: fallback.
@@ -388,3 +390,196 @@ func (s *Stat) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PtraceRegs) SizeBytes() int {
+ return 216
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PtraceRegs) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R15))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R14))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R13))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R12))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rbp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rbx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R11))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R10))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R9))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.R8))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rax))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rcx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rdx))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rsi))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rdi))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Orig_rax))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rip))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Cs))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Eflags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Rsp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Ss))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Fs_base))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Gs_base))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Ds))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Es))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Fs))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Gs))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PtraceRegs) UnmarshalBytes(src []byte) {
+ p.R15 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R14 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R13 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R12 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rbp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rbx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R11 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R10 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R9 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.R8 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rcx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rdx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rsi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rdi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Orig_rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rip = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Cs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Eflags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Rsp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Ss = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Fs_base = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Gs_base = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Ds = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Es = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Fs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Gs = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PtraceRegs) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PtraceRegs) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(p))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(p), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PtraceRegs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PtraceRegs) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return p.CopyOutN(task, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PtraceRegs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PtraceRegs) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return int64(length), err
+}
+
diff --git a/pkg/abi/linux/linux_amd64_state_autogen.go b/pkg/abi/linux/linux_amd64_state_autogen.go
index b3e3be0b6..58cabce4c 100755
--- a/pkg/abi/linux/linux_amd64_state_autogen.go
+++ b/pkg/abi/linux/linux_amd64_state_autogen.go
@@ -3,5 +3,77 @@
// +build amd64
// +build amd64
// +build amd64
+// +build amd64
package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (x *PtraceRegs) beforeSave() {}
+func (x *PtraceRegs) save(m state.Map) {
+ x.beforeSave()
+ m.Save("R15", &x.R15)
+ m.Save("R14", &x.R14)
+ m.Save("R13", &x.R13)
+ m.Save("R12", &x.R12)
+ m.Save("Rbp", &x.Rbp)
+ m.Save("Rbx", &x.Rbx)
+ m.Save("R11", &x.R11)
+ m.Save("R10", &x.R10)
+ m.Save("R9", &x.R9)
+ m.Save("R8", &x.R8)
+ m.Save("Rax", &x.Rax)
+ m.Save("Rcx", &x.Rcx)
+ m.Save("Rdx", &x.Rdx)
+ m.Save("Rsi", &x.Rsi)
+ m.Save("Rdi", &x.Rdi)
+ m.Save("Orig_rax", &x.Orig_rax)
+ m.Save("Rip", &x.Rip)
+ m.Save("Cs", &x.Cs)
+ m.Save("Eflags", &x.Eflags)
+ m.Save("Rsp", &x.Rsp)
+ m.Save("Ss", &x.Ss)
+ m.Save("Fs_base", &x.Fs_base)
+ m.Save("Gs_base", &x.Gs_base)
+ m.Save("Ds", &x.Ds)
+ m.Save("Es", &x.Es)
+ m.Save("Fs", &x.Fs)
+ m.Save("Gs", &x.Gs)
+}
+
+func (x *PtraceRegs) afterLoad() {}
+func (x *PtraceRegs) load(m state.Map) {
+ m.Load("R15", &x.R15)
+ m.Load("R14", &x.R14)
+ m.Load("R13", &x.R13)
+ m.Load("R12", &x.R12)
+ m.Load("Rbp", &x.Rbp)
+ m.Load("Rbx", &x.Rbx)
+ m.Load("R11", &x.R11)
+ m.Load("R10", &x.R10)
+ m.Load("R9", &x.R9)
+ m.Load("R8", &x.R8)
+ m.Load("Rax", &x.Rax)
+ m.Load("Rcx", &x.Rcx)
+ m.Load("Rdx", &x.Rdx)
+ m.Load("Rsi", &x.Rsi)
+ m.Load("Rdi", &x.Rdi)
+ m.Load("Orig_rax", &x.Orig_rax)
+ m.Load("Rip", &x.Rip)
+ m.Load("Cs", &x.Cs)
+ m.Load("Eflags", &x.Eflags)
+ m.Load("Rsp", &x.Rsp)
+ m.Load("Ss", &x.Ss)
+ m.Load("Fs_base", &x.Fs_base)
+ m.Load("Gs_base", &x.Gs_base)
+ m.Load("Ds", &x.Ds)
+ m.Load("Es", &x.Es)
+ m.Load("Fs", &x.Fs)
+ m.Load("Gs", &x.Gs)
+}
+
+func init() {
+ state.Register("pkg/abi/linux.PtraceRegs", (*PtraceRegs)(nil), state.Fns{Save: (*PtraceRegs).save, Load: (*PtraceRegs).load})
+}
diff --git a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
index a87ef68be..a5a5f78aa 100755
--- a/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go
@@ -2,6 +2,7 @@
// +build arm64
// +build arm64
+// +build arm64
package linux
@@ -18,6 +19,7 @@ import (
// Marshallable types used by this file.
var _ marshal.Marshallable = (*EpollEvent)(nil)
+var _ marshal.Marshallable = (*PtraceRegs)(nil)
var _ marshal.Marshallable = (*Stat)(nil)
var _ marshal.Marshallable = (*Timespec)(nil)
@@ -307,7 +309,7 @@ func (s *Stat) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Stat) UnmarshalUnsafe(src []byte) {
- if s.MTime.Packed() && s.CTime.Packed() && s.ATime.Packed() {
+ if s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
s.UnmarshalBytes(src)
@@ -373,7 +375,7 @@ func (s *Stat) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
// WriteTo implements io.WriterTo.WriteTo.
func (s *Stat) WriteTo(w io.Writer) (int64, error) {
- if !s.ATime.Packed() && s.MTime.Packed() && s.CTime.Packed() {
+ if !s.CTime.Packed() && s.ATime.Packed() && s.MTime.Packed() {
// Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := make([]byte, s.SizeBytes())
s.MarshalBytes(buf)
@@ -395,3 +397,109 @@ func (s *Stat) WriteTo(w io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (p *PtraceRegs) SizeBytes() int {
+ return 24 +
+ 8*31
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (p *PtraceRegs) MarshalBytes(dst []byte) {
+ for idx := 0; idx < 31; idx++ {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Regs[idx]))
+ dst = dst[8:]
+ }
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Sp))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Pc))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(p.Pstate))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (p *PtraceRegs) UnmarshalBytes(src []byte) {
+ for idx := 0; idx < 31; idx++ {
+ p.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ }
+ p.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ p.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (p *PtraceRegs) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (p *PtraceRegs) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(p))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (p *PtraceRegs) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(p), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (p *PtraceRegs) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (p *PtraceRegs) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return p.CopyOutN(task, addr, p.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (p *PtraceRegs) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (p *PtraceRegs) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p)))
+ hdr.Len = p.SizeBytes()
+ hdr.Cap = p.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that p
+ // must live until the use above.
+ runtime.KeepAlive(p)
+ return int64(length), err
+}
+
diff --git a/pkg/abi/linux/linux_arm64_state_autogen.go b/pkg/abi/linux/linux_arm64_state_autogen.go
index b144adbda..301a5cb76 100755
--- a/pkg/abi/linux/linux_arm64_state_autogen.go
+++ b/pkg/abi/linux/linux_arm64_state_autogen.go
@@ -2,5 +2,31 @@
// +build arm64
// +build arm64
+// +build arm64
package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (x *PtraceRegs) beforeSave() {}
+func (x *PtraceRegs) save(m state.Map) {
+ x.beforeSave()
+ m.Save("Regs", &x.Regs)
+ m.Save("Sp", &x.Sp)
+ m.Save("Pc", &x.Pc)
+ m.Save("Pstate", &x.Pstate)
+}
+
+func (x *PtraceRegs) afterLoad() {}
+func (x *PtraceRegs) load(m state.Map) {
+ m.Load("Regs", &x.Regs)
+ m.Load("Sp", &x.Sp)
+ m.Load("Pc", &x.Pc)
+ m.Load("Pstate", &x.Pstate)
+}
+
+func init() {
+ state.Register("pkg/abi/linux.PtraceRegs", (*PtraceRegs)(nil), state.Fns{Save: (*PtraceRegs).save, Load: (*PtraceRegs).load})
+}
diff --git a/pkg/abi/linux/ptrace_amd64.go b/pkg/abi/linux/ptrace_amd64.go
new file mode 100755
index 000000000..ed3881e27
--- /dev/null
+++ b/pkg/abi/linux/ptrace_amd64.go
@@ -0,0 +1,52 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+package linux
+
+// PtraceRegs is the set of CPU registers exposed by ptrace. Source:
+// syscall.PtraceRegs.
+//
+// +marshal
+// +stateify savable
+type PtraceRegs struct {
+ R15 uint64
+ R14 uint64
+ R13 uint64
+ R12 uint64
+ Rbp uint64
+ Rbx uint64
+ R11 uint64
+ R10 uint64
+ R9 uint64
+ R8 uint64
+ Rax uint64
+ Rcx uint64
+ Rdx uint64
+ Rsi uint64
+ Rdi uint64
+ Orig_rax uint64
+ Rip uint64
+ Cs uint64
+ Eflags uint64
+ Rsp uint64
+ Ss uint64
+ Fs_base uint64
+ Gs_base uint64
+ Ds uint64
+ Es uint64
+ Fs uint64
+ Gs uint64
+}
diff --git a/pkg/sentry/arch/arch_state_aarch64.go b/pkg/abi/linux/ptrace_arm64.go
index 0136a85ad..6147738b3 100755
--- a/pkg/sentry/arch/arch_state_aarch64.go
+++ b/pkg/abi/linux/ptrace_arm64.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The gVisor Authors.
+// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,25 +14,16 @@
// +build arm64
-package arch
+package linux
-import (
- "syscall"
-)
-
-type syscallPtraceRegs struct {
+// PtraceRegs is the set of CPU registers exposed by ptrace. Source:
+// syscall.PtraceRegs.
+//
+// +marshal
+// +stateify savable
+type PtraceRegs struct {
Regs [31]uint64
Sp uint64
Pc uint64
Pstate uint64
}
-
-// saveRegs is invoked by stateify.
-func (s *State) saveRegs() syscallPtraceRegs {
- return syscallPtraceRegs(s.Regs)
-}
-
-// loadRegs is invoked by stateify.
-func (s *State) loadRegs(r syscallPtraceRegs) {
- s.Regs = syscall.PtraceRegs(r)
-}
diff --git a/pkg/sentry/arch/arch_aarch64.go b/pkg/sentry/arch/arch_aarch64.go
index c29e1b841..529980267 100755
--- a/pkg/sentry/arch/arch_aarch64.go
+++ b/pkg/sentry/arch/arch_aarch64.go
@@ -17,18 +17,20 @@
package arch
import (
+ "encoding/binary"
"fmt"
"io"
- "syscall"
- "gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/log"
rpb "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
+// Registers represents the CPU registers for this architecture.
+type Registers = linux.PtraceRegs
+
const (
// SyscallWidth is the width of insturctions.
SyscallWidth = 4
@@ -90,7 +92,7 @@ func NewFloatingPointData() *FloatingPointData {
// file ensures it's only built on aarch64).
type State struct {
// The system registers.
- Regs syscall.PtraceRegs `state:".(syscallPtraceRegs)"`
+ Regs Registers
// Our floating point state.
aarch64FPState `state:"wait"`
@@ -226,25 +228,27 @@ func (s *State) RegisterMap() (map[string]uintptr, error) {
// PtraceGetRegs implements Context.PtraceGetRegs.
func (s *State) PtraceGetRegs(dst io.Writer) (int, error) {
- return dst.Write(binary.Marshal(nil, usermem.ByteOrder, s.ptraceGetRegs()))
+ regs := s.ptraceGetRegs()
+ n, err := regs.WriteTo(dst)
+ return int(n), err
}
-func (s *State) ptraceGetRegs() syscall.PtraceRegs {
+func (s *State) ptraceGetRegs() Registers {
return s.Regs
}
-var ptraceRegsSize = int(binary.Size(syscall.PtraceRegs{}))
+var registersSize = (*Registers)(nil).SizeBytes()
// PtraceSetRegs implements Context.PtraceSetRegs.
func (s *State) PtraceSetRegs(src io.Reader) (int, error) {
- var regs syscall.PtraceRegs
- buf := make([]byte, ptraceRegsSize)
+ var regs Registers
+ buf := make([]byte, registersSize)
if _, err := io.ReadFull(src, buf); err != nil {
return 0, err
}
- binary.Unmarshal(buf, usermem.ByteOrder, &regs)
+ regs.UnmarshalUnsafe(buf)
s.Regs = regs
- return ptraceRegsSize, nil
+ return registersSize, nil
}
// PtraceGetFPRegs implements Context.PtraceGetFPRegs.
diff --git a/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go
new file mode 100755
index 000000000..f7a3597d4
--- /dev/null
+++ b/pkg/sentry/arch/arch_aarch64_abi_autogen_unsafe.go
@@ -0,0 +1,9 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build arm64
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_aarch64_state_autogen.go b/pkg/sentry/arch/arch_aarch64_state_autogen.go
index 9c6dfdf2e..49f2e3d67 100755
--- a/pkg/sentry/arch/arch_aarch64_state_autogen.go
+++ b/pkg/sentry/arch/arch_aarch64_state_autogen.go
@@ -1,6 +1,5 @@
// automatically generated by stateify.
// +build arm64
-// +build arm64
package arch
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
new file mode 100755
index 000000000..3e94c6144
--- /dev/null
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -0,0 +1,372 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build amd64 386
+// +build amd64 386
+// +build 386 amd64 arm64
+
+package arch
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/tools/go_marshal/marshal"
+ "io"
+ "reflect"
+ "runtime"
+ "unsafe"
+)
+
+// Marshallable types used by this file.
+var _ marshal.Marshallable = (*SignalAct)(nil)
+var _ marshal.Marshallable = (*SignalInfo)(nil)
+var _ marshal.Marshallable = (*SignalStack)(nil)
+var _ marshal.Marshallable = (*linux.SignalSet)(nil)
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalAct) SizeBytes() int {
+ return 24 +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalAct) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
+ dst = dst[8:]
+ s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()])
+ dst = dst[s.Mask.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalAct) UnmarshalBytes(src []byte) {
+ s.Handler = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Restorer = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()])
+ src = src[s.Mask.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalAct) Packed() bool {
+ return s.Mask.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalAct) MarshalUnsafe(dst []byte) {
+ if s.Mask.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+ } else {
+ s.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalAct) UnmarshalUnsafe(src []byte) {
+ if s.Mask.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+ } else {
+ s.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalAct) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ if !s.Mask.Packed() {
+ // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ s.MarshalBytes(buf) // escapes: fallback.
+ return task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalAct) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalAct) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ if !s.Mask.Packed() {
+ // Type SignalAct doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := task.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ s.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalAct) WriteTo(w io.Writer) (int64, error) {
+ if !s.Mask.Packed() {
+ // Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, s.SizeBytes())
+ s.MarshalBytes(buf)
+ length, err := w.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalStack) SizeBytes() int {
+ return 24
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalStack) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ dst = dst[8:]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalStack) UnmarshalBytes(src []byte) {
+ s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ s.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ s.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalStack) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalStack) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalStack) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalStack) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalStack) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalStack) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalStack) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalInfo) SizeBytes() int {
+ return 16 +
+ 1*(128-16)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalInfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ dst[0] = byte(s.Fields[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalInfo) UnmarshalBytes(src []byte) {
+ s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ s.Fields[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalInfo) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalInfo) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalInfo) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(task, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalInfo) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := task.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalInfo) WriteTo(w io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := w.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s)
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go
index 85d6acc0f..3b3a0a272 100644
--- a/pkg/sentry/arch/arch_amd64.go
+++ b/pkg/sentry/arch/arch_amd64.go
@@ -22,7 +22,6 @@ import (
"math/rand"
"syscall"
- "gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/usermem"
@@ -301,8 +300,10 @@ func (c *context64) PtracePeekUser(addr uintptr) (interface{}, error) {
// PTRACE_PEEKUSER and PTRACE_POKEUSER are only effective on regs and
// u_debugreg, returning 0 or silently no-oping for other fields
// respectively.
- if addr < uintptr(ptraceRegsSize) {
- buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs())
+ if addr < uintptr(registersSize) {
+ regs := c.ptraceGetRegs()
+ buf := make([]byte, regs.SizeBytes())
+ regs.MarshalUnsafe(buf)
return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil
}
// Note: x86 debug registers are missing.
@@ -314,8 +315,10 @@ func (c *context64) PtracePokeUser(addr, data uintptr) error {
if addr&7 != 0 || addr >= userStructSize {
return syscall.EIO
}
- if addr < uintptr(ptraceRegsSize) {
- buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs())
+ if addr < uintptr(registersSize) {
+ regs := c.ptraceGetRegs()
+ buf := make([]byte, regs.SizeBytes())
+ regs.MarshalUnsafe(buf)
usermem.ByteOrder.PutUint64(buf[addr:], uint64(data))
_, err := c.PtraceSetRegs(bytes.NewBuffer(buf))
return err
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
new file mode 100755
index 000000000..8eadb6e22
--- /dev/null
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -0,0 +1,11 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build amd64
+// +build amd64
+// +build amd64
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_arm64.go b/pkg/sentry/arch/arch_arm64.go
index db99c5acb..ada7ac7b8 100755
--- a/pkg/sentry/arch/arch_arm64.go
+++ b/pkg/sentry/arch/arch_arm64.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// +build arm64
+
package arch
import (
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
new file mode 100755
index 000000000..c8e4c7c24
--- /dev/null
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -0,0 +1,10 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build arm64
+// +build arm64
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_arm64_state_autogen.go b/pkg/sentry/arch/arch_arm64_state_autogen.go
index 49f2e3d67..9c6dfdf2e 100755
--- a/pkg/sentry/arch/arch_arm64_state_autogen.go
+++ b/pkg/sentry/arch/arch_arm64_state_autogen.go
@@ -1,5 +1,6 @@
// automatically generated by stateify.
// +build arm64
+// +build arm64
package arch
diff --git a/pkg/sentry/arch/arch_impl_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_impl_abi_autogen_unsafe.go
new file mode 100755
index 000000000..4357dc093
--- /dev/null
+++ b/pkg/sentry/arch/arch_impl_abi_autogen_unsafe.go
@@ -0,0 +1,9 @@
+// Automatically generated marshal implementation. See tools/go_marshal.
+
+// +build amd64 386
+
+package arch
+
+import (
+)
+
diff --git a/pkg/sentry/arch/arch_impl_state_autogen.go b/pkg/sentry/arch/arch_impl_state_autogen.go
index 55721c710..7303bb17f 100755
--- a/pkg/sentry/arch/arch_impl_state_autogen.go
+++ b/pkg/sentry/arch/arch_impl_state_autogen.go
@@ -11,16 +11,15 @@ import (
func (x *State) beforeSave() {}
func (x *State) save(m state.Map) {
x.beforeSave()
- var Regs syscallPtraceRegs = x.saveRegs()
- m.SaveValue("Regs", Regs)
+ m.Save("Regs", &x.Regs)
m.Save("x86FPState", &x.x86FPState)
m.Save("FeatureSet", &x.FeatureSet)
}
func (x *State) load(m state.Map) {
+ m.Load("Regs", &x.Regs)
m.LoadWait("x86FPState", &x.x86FPState)
m.Load("FeatureSet", &x.FeatureSet)
- m.LoadValue("Regs", new(syscallPtraceRegs), func(y interface{}) { x.loadRegs(y.(syscallPtraceRegs)) })
m.AfterLoad(x.afterLoad)
}
diff --git a/pkg/sentry/arch/arch_state_autogen.go b/pkg/sentry/arch/arch_state_autogen.go
index 1bcc94438..bb4bcda4b 100755
--- a/pkg/sentry/arch/arch_state_autogen.go
+++ b/pkg/sentry/arch/arch_state_autogen.go
@@ -31,69 +31,6 @@ func (x *MmapLayout) load(m state.Map) {
m.Load("MaxStackRand", &x.MaxStackRand)
}
-func (x *syscallPtraceRegs) beforeSave() {}
-func (x *syscallPtraceRegs) save(m state.Map) {
- x.beforeSave()
- m.Save("R15", &x.R15)
- m.Save("R14", &x.R14)
- m.Save("R13", &x.R13)
- m.Save("R12", &x.R12)
- m.Save("Rbp", &x.Rbp)
- m.Save("Rbx", &x.Rbx)
- m.Save("R11", &x.R11)
- m.Save("R10", &x.R10)
- m.Save("R9", &x.R9)
- m.Save("R8", &x.R8)
- m.Save("Rax", &x.Rax)
- m.Save("Rcx", &x.Rcx)
- m.Save("Rdx", &x.Rdx)
- m.Save("Rsi", &x.Rsi)
- m.Save("Rdi", &x.Rdi)
- m.Save("Orig_rax", &x.Orig_rax)
- m.Save("Rip", &x.Rip)
- m.Save("Cs", &x.Cs)
- m.Save("Eflags", &x.Eflags)
- m.Save("Rsp", &x.Rsp)
- m.Save("Ss", &x.Ss)
- m.Save("Fs_base", &x.Fs_base)
- m.Save("Gs_base", &x.Gs_base)
- m.Save("Ds", &x.Ds)
- m.Save("Es", &x.Es)
- m.Save("Fs", &x.Fs)
- m.Save("Gs", &x.Gs)
-}
-
-func (x *syscallPtraceRegs) afterLoad() {}
-func (x *syscallPtraceRegs) load(m state.Map) {
- m.Load("R15", &x.R15)
- m.Load("R14", &x.R14)
- m.Load("R13", &x.R13)
- m.Load("R12", &x.R12)
- m.Load("Rbp", &x.Rbp)
- m.Load("Rbx", &x.Rbx)
- m.Load("R11", &x.R11)
- m.Load("R10", &x.R10)
- m.Load("R9", &x.R9)
- m.Load("R8", &x.R8)
- m.Load("Rax", &x.Rax)
- m.Load("Rcx", &x.Rcx)
- m.Load("Rdx", &x.Rdx)
- m.Load("Rsi", &x.Rsi)
- m.Load("Rdi", &x.Rdi)
- m.Load("Orig_rax", &x.Orig_rax)
- m.Load("Rip", &x.Rip)
- m.Load("Cs", &x.Cs)
- m.Load("Eflags", &x.Eflags)
- m.Load("Rsp", &x.Rsp)
- m.Load("Ss", &x.Ss)
- m.Load("Fs_base", &x.Fs_base)
- m.Load("Gs_base", &x.Gs_base)
- m.Load("Ds", &x.Ds)
- m.Load("Es", &x.Es)
- m.Load("Fs", &x.Fs)
- m.Load("Gs", &x.Gs)
-}
-
func (x *AuxEntry) beforeSave() {}
func (x *AuxEntry) save(m state.Map) {
x.beforeSave()
@@ -158,7 +95,6 @@ func (x *SignalInfo) load(m state.Map) {
func init() {
state.Register("pkg/sentry/arch.MmapLayout", (*MmapLayout)(nil), state.Fns{Save: (*MmapLayout).save, Load: (*MmapLayout).load})
- state.Register("pkg/sentry/arch.syscallPtraceRegs", (*syscallPtraceRegs)(nil), state.Fns{Save: (*syscallPtraceRegs).save, Load: (*syscallPtraceRegs).load})
state.Register("pkg/sentry/arch.AuxEntry", (*AuxEntry)(nil), state.Fns{Save: (*AuxEntry).save, Load: (*AuxEntry).load})
state.Register("pkg/sentry/arch.SignalAct", (*SignalAct)(nil), state.Fns{Save: (*SignalAct).save, Load: (*SignalAct).load})
state.Register("pkg/sentry/arch.SignalStack", (*SignalStack)(nil), state.Fns{Save: (*SignalStack).save, Load: (*SignalStack).load})
diff --git a/pkg/sentry/arch/arch_state_x86.go b/pkg/sentry/arch/arch_state_x86.go
index aa31169e0..19ce99d25 100644
--- a/pkg/sentry/arch/arch_state_x86.go
+++ b/pkg/sentry/arch/arch_state_x86.go
@@ -18,7 +18,6 @@ package arch
import (
"fmt"
- "syscall"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/usermem"
@@ -90,44 +89,3 @@ func (s *State) afterLoadFPState() {
// Copy to the new, aligned location.
copy(s.x86FPState, old)
}
-
-// +stateify savable
-type syscallPtraceRegs struct {
- R15 uint64
- R14 uint64
- R13 uint64
- R12 uint64
- Rbp uint64
- Rbx uint64
- R11 uint64
- R10 uint64
- R9 uint64
- R8 uint64
- Rax uint64
- Rcx uint64
- Rdx uint64
- Rsi uint64
- Rdi uint64
- Orig_rax uint64
- Rip uint64
- Cs uint64
- Eflags uint64
- Rsp uint64
- Ss uint64
- Fs_base uint64
- Gs_base uint64
- Ds uint64
- Es uint64
- Fs uint64
- Gs uint64
-}
-
-// saveRegs is invoked by stateify.
-func (s *State) saveRegs() syscallPtraceRegs {
- return syscallPtraceRegs(s.Regs)
-}
-
-// loadRegs is invoked by stateify.
-func (s *State) loadRegs(r syscallPtraceRegs) {
- s.Regs = syscall.PtraceRegs(r)
-}
diff --git a/pkg/sentry/arch/arch_x86.go b/pkg/sentry/arch/arch_x86.go
index 7fc4c0473..dc458b37f 100644
--- a/pkg/sentry/arch/arch_x86.go
+++ b/pkg/sentry/arch/arch_x86.go
@@ -21,7 +21,7 @@ import (
"io"
"syscall"
- "gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/log"
rpb "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
@@ -30,6 +30,9 @@ import (
"gvisor.dev/gvisor/pkg/usermem"
)
+// Registers represents the CPU registers for this architecture.
+type Registers = linux.PtraceRegs
+
// System-related constants for x86.
const (
// SyscallWidth is the width of syscall, sysenter, and int 80 insturctions.
@@ -267,10 +270,12 @@ func (s *State) RegisterMap() (map[string]uintptr, error) {
// PtraceGetRegs implements Context.PtraceGetRegs.
func (s *State) PtraceGetRegs(dst io.Writer) (int, error) {
- return dst.Write(binary.Marshal(nil, usermem.ByteOrder, s.ptraceGetRegs()))
+ regs := s.ptraceGetRegs()
+ n, err := regs.WriteTo(dst)
+ return int(n), err
}
-func (s *State) ptraceGetRegs() syscall.PtraceRegs {
+func (s *State) ptraceGetRegs() Registers {
regs := s.Regs
// These may not be initialized.
if regs.Cs == 0 || regs.Ss == 0 || regs.Eflags == 0 {
@@ -306,16 +311,16 @@ func (s *State) ptraceGetRegs() syscall.PtraceRegs {
return regs
}
-var ptraceRegsSize = int(binary.Size(syscall.PtraceRegs{}))
+var registersSize = (*Registers)(nil).SizeBytes()
// PtraceSetRegs implements Context.PtraceSetRegs.
func (s *State) PtraceSetRegs(src io.Reader) (int, error) {
- var regs syscall.PtraceRegs
- buf := make([]byte, ptraceRegsSize)
+ var regs Registers
+ buf := make([]byte, registersSize)
if _, err := io.ReadFull(src, buf); err != nil {
return 0, err
}
- binary.Unmarshal(buf, usermem.ByteOrder, &regs)
+ regs.UnmarshalUnsafe(buf)
// Truncate segment registers to 16 bits.
regs.Cs = uint64(uint16(regs.Cs))
regs.Ds = uint64(uint16(regs.Ds))
@@ -369,7 +374,7 @@ func (s *State) PtraceSetRegs(src io.Reader) (int, error) {
}
regs.Eflags = (s.Regs.Eflags &^ eflagsPtraceMutable) | (regs.Eflags & eflagsPtraceMutable)
s.Regs = regs
- return ptraceRegsSize, nil
+ return registersSize, nil
}
// isUserSegmentSelector returns true if the given segment selector specifies a
@@ -538,7 +543,7 @@ const (
func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) {
switch regset {
case _NT_PRSTATUS:
- if maxlen < ptraceRegsSize {
+ if maxlen < registersSize {
return 0, syserror.EFAULT
}
return s.PtraceGetRegs(dst)
@@ -558,7 +563,7 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,
func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) {
switch regset {
case _NT_PRSTATUS:
- if maxlen < ptraceRegsSize {
+ if maxlen < registersSize {
return 0, syserror.EFAULT
}
return s.PtraceSetRegs(src)
diff --git a/pkg/sentry/arch/arch_x86_impl.go b/pkg/sentry/arch/arch_x86_impl.go
index 3edf40764..0c73fcbfb 100755
--- a/pkg/sentry/arch/arch_x86_impl.go
+++ b/pkg/sentry/arch/arch_x86_impl.go
@@ -17,8 +17,6 @@
package arch
import (
- "syscall"
-
"gvisor.dev/gvisor/pkg/cpuid"
)
@@ -28,7 +26,7 @@ import (
// +stateify savable
type State struct {
// The system registers.
- Regs syscall.PtraceRegs `state:".(syscallPtraceRegs)"`
+ Regs Registers
// Our floating point state.
x86FPState `state:"wait"`
diff --git a/pkg/sentry/arch/signal.go b/pkg/sentry/arch/signal.go
index 8b03d0187..c9fb55d00 100755
--- a/pkg/sentry/arch/signal.go
+++ b/pkg/sentry/arch/signal.go
@@ -22,6 +22,7 @@ import (
// SignalAct represents the action that should be taken when a signal is
// delivered, and is equivalent to struct sigaction.
//
+// +marshal
// +stateify savable
type SignalAct struct {
Handler uint64
@@ -43,6 +44,7 @@ func (s *SignalAct) DeserializeTo(other *SignalAct) {
// SignalStack represents information about a user stack, and is equivalent to
// stack_t.
//
+// +marshal
// +stateify savable
type SignalStack struct {
Addr uint64
@@ -64,6 +66,7 @@ func (s *SignalStack) DeserializeTo(other *SignalStack) {
// SignalInfo represents information about a signal being delivered, and is
// equivalent to struct siginfo in linux kernel(linux/include/uapi/asm-generic/siginfo.h).
//
+// +marshal
// +stateify savable
type SignalInfo struct {
Signo int32 // Signal number
diff --git a/pkg/sentry/arch/signal_act.go b/pkg/sentry/arch/signal_act.go
index f9ca2e74e..32173aa20 100644
--- a/pkg/sentry/arch/signal_act.go
+++ b/pkg/sentry/arch/signal_act.go
@@ -14,6 +14,8 @@
package arch
+import "gvisor.dev/gvisor/tools/go_marshal/marshal"
+
// Special values for SignalAct.Handler.
const (
// SignalActDefault is SIG_DFL and specifies that the default behavior for
@@ -71,6 +73,8 @@ func (s SignalAct) HasRestorer() bool {
// NativeSignalAct is a type that is equivalent to struct sigaction in the
// guest architecture.
type NativeSignalAct interface {
+ marshal.Marshallable
+
// SerializeFrom copies the data in the host SignalAct s into this object.
SerializeFrom(s *SignalAct)
diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go
index e58f055c7..0fa738a1d 100644
--- a/pkg/sentry/arch/signal_stack.go
+++ b/pkg/sentry/arch/signal_stack.go
@@ -18,6 +18,7 @@ package arch
import (
"gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/tools/go_marshal/marshal"
)
const (
@@ -55,6 +56,8 @@ func (s *SignalStack) Contains(sp usermem.Addr) bool {
// NativeSignalStack is a type that is equivalent to stack_t in the guest
// architecture.
type NativeSignalStack interface {
+ marshal.Marshallable
+
// SerializeFrom copies the data in the host SignalStack s into this
// object.
SerializeFrom(s *SignalStack)
diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go
index 7d25e98f7..79766cafe 100644
--- a/pkg/sentry/kernel/task_signals.go
+++ b/pkg/sentry/kernel/task_signals.go
@@ -716,7 +716,7 @@ func (tg *ThreadGroup) SetSignalAct(sig linux.Signal, actptr *arch.SignalAct) (a
func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error {
n := t.Arch().NewSignalAct()
n.SerializeFrom(s)
- _, err := t.CopyOut(addr, n)
+ _, err := n.CopyOut(t, addr)
return err
}
@@ -725,7 +725,7 @@ func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error {
func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) {
n := t.Arch().NewSignalAct()
var s arch.SignalAct
- if _, err := t.CopyIn(addr, n); err != nil {
+ if _, err := n.CopyIn(t, addr); err != nil {
return s, err
}
n.DeserializeTo(&s)
@@ -737,7 +737,7 @@ func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) {
func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error {
n := t.Arch().NewSignalStack()
n.SerializeFrom(s)
- _, err := t.CopyOut(addr, n)
+ _, err := n.CopyOut(t, addr)
return err
}
@@ -746,7 +746,7 @@ func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error
func (t *Task) CopyInSignalStack(addr usermem.Addr) (arch.SignalStack, error) {
n := t.Arch().NewSignalStack()
var s arch.SignalStack
- if _, err := t.CopyIn(addr, n); err != nil {
+ if _, err := n.CopyIn(t, addr); err != nil {
return s, err
}
n.DeserializeTo(&s)
diff --git a/pkg/sentry/platform/kvm/kvm_arm64.go b/pkg/sentry/platform/kvm/kvm_arm64.go
index 716198712..29d457a7e 100755
--- a/pkg/sentry/platform/kvm/kvm_arm64.go
+++ b/pkg/sentry/platform/kvm/kvm_arm64.go
@@ -17,8 +17,7 @@
package kvm
import (
- "syscall"
-
+ "gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform/ring0"
)
@@ -37,7 +36,7 @@ type userFpsimdState struct {
}
type userRegs struct {
- Regs syscall.PtraceRegs
+ Regs arch.Registers
sp_el1 uint64
elr_el1 uint64
spsr [KVM_NR_SPSR]uint64
diff --git a/pkg/sentry/platform/ptrace/ptrace_amd64.go b/pkg/sentry/platform/ptrace/ptrace_amd64.go
index 24fc5dc62..3b9a870a5 100644
--- a/pkg/sentry/platform/ptrace/ptrace_amd64.go
+++ b/pkg/sentry/platform/ptrace/ptrace_amd64.go
@@ -15,9 +15,8 @@
package ptrace
import (
- "syscall"
-
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
)
// fpRegSet returns the GETREGSET/SETREGSET register set type to be used.
@@ -28,12 +27,12 @@ func fpRegSet(useXsave bool) uintptr {
return linux.NT_PRFPREG
}
-func stackPointer(r *syscall.PtraceRegs) uintptr {
+func stackPointer(r *arch.Registers) uintptr {
return uintptr(r.Rsp)
}
// x86 use the fs_base register to store the TLS pointer which can be
-// get/set in "func (t *thread) get/setRegs(regs *syscall.PtraceRegs)".
+// get/set in "func (t *thread) get/setRegs(regs *arch.Registers)".
// So both of the get/setTLS() operations are noop here.
// getTLS gets the thread local storage register.
diff --git a/pkg/sentry/platform/ptrace/ptrace_arm64.go b/pkg/sentry/platform/ptrace/ptrace_arm64.go
index 4db28c534..5c869926a 100644
--- a/pkg/sentry/platform/ptrace/ptrace_arm64.go
+++ b/pkg/sentry/platform/ptrace/ptrace_arm64.go
@@ -15,9 +15,8 @@
package ptrace
import (
- "syscall"
-
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
)
// fpRegSet returns the GETREGSET/SETREGSET register set type to be used.
@@ -25,6 +24,6 @@ func fpRegSet(_ bool) uintptr {
return linux.NT_PRFPREG
}
-func stackPointer(r *syscall.PtraceRegs) uintptr {
+func stackPointer(r *arch.Registers) uintptr {
return uintptr(r.Sp)
}
diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
index 6c0ed7b3e..8b72d24e8 100644
--- a/pkg/sentry/platform/ptrace/ptrace_unsafe.go
+++ b/pkg/sentry/platform/ptrace/ptrace_unsafe.go
@@ -24,7 +24,7 @@ import (
)
// getRegs gets the general purpose register set.
-func (t *thread) getRegs(regs *syscall.PtraceRegs) error {
+func (t *thread) getRegs(regs *arch.Registers) error {
iovec := syscall.Iovec{
Base: (*byte)(unsafe.Pointer(regs)),
Len: uint64(unsafe.Sizeof(*regs)),
@@ -43,7 +43,7 @@ func (t *thread) getRegs(regs *syscall.PtraceRegs) error {
}
// setRegs sets the general purpose register set.
-func (t *thread) setRegs(regs *syscall.PtraceRegs) error {
+func (t *thread) setRegs(regs *arch.Registers) error {
iovec := syscall.Iovec{
Base: (*byte)(unsafe.Pointer(regs)),
Len: uint64(unsafe.Sizeof(*regs)),
diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go
index 773ddb1ed..2389423b0 100644
--- a/pkg/sentry/platform/ptrace/subprocess.go
+++ b/pkg/sentry/platform/ptrace/subprocess.go
@@ -63,7 +63,7 @@ type thread struct {
// initRegs are the initial registers for the first thread.
//
// These are used for the register set for system calls.
- initRegs syscall.PtraceRegs
+ initRegs arch.Registers
}
// threadPool is a collection of threads.
@@ -317,7 +317,7 @@ const (
)
func (t *thread) dumpAndPanic(message string) {
- var regs syscall.PtraceRegs
+ var regs arch.Registers
message += "\n"
if err := t.getRegs(&regs); err == nil {
message += dumpRegs(&regs)
@@ -423,7 +423,7 @@ func (t *thread) init() {
// This is _not_ for use by application system calls, rather it is for use when
// a system call must be injected into the remote context (e.g. mmap, munmap).
// Note that clones are handled separately.
-func (t *thread) syscall(regs *syscall.PtraceRegs) (uintptr, error) {
+func (t *thread) syscall(regs *arch.Registers) (uintptr, error) {
// Set registers.
if err := t.setRegs(regs); err != nil {
panic(fmt.Sprintf("ptrace set regs failed: %v", err))
@@ -461,7 +461,7 @@ func (t *thread) syscall(regs *syscall.PtraceRegs) (uintptr, error) {
// syscallIgnoreInterrupt ignores interrupts on the system call thread and
// restarts the syscall if the kernel indicates that should happen.
func (t *thread) syscallIgnoreInterrupt(
- initRegs *syscall.PtraceRegs,
+ initRegs *arch.Registers,
sysno uintptr,
args ...arch.SyscallArgument) (uintptr, error) {
for {
diff --git a/pkg/sentry/platform/ptrace/subprocess_amd64.go b/pkg/sentry/platform/ptrace/subprocess_amd64.go
index cd74945e7..84b699f0d 100644
--- a/pkg/sentry/platform/ptrace/subprocess_amd64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_amd64.go
@@ -41,7 +41,7 @@ const (
// resetSysemuRegs sets up emulation registers.
//
// This should be called prior to calling sysemu.
-func (t *thread) resetSysemuRegs(regs *syscall.PtraceRegs) {
+func (t *thread) resetSysemuRegs(regs *arch.Registers) {
regs.Cs = t.initRegs.Cs
regs.Ss = t.initRegs.Ss
regs.Ds = t.initRegs.Ds
@@ -53,7 +53,7 @@ func (t *thread) resetSysemuRegs(regs *syscall.PtraceRegs) {
// createSyscallRegs sets up syscall registers.
//
// This should be called to generate registers for a system call.
-func createSyscallRegs(initRegs *syscall.PtraceRegs, sysno uintptr, args ...arch.SyscallArgument) syscall.PtraceRegs {
+func createSyscallRegs(initRegs *arch.Registers, sysno uintptr, args ...arch.SyscallArgument) arch.Registers {
// Copy initial registers.
regs := *initRegs
@@ -82,18 +82,18 @@ func createSyscallRegs(initRegs *syscall.PtraceRegs, sysno uintptr, args ...arch
}
// isSingleStepping determines if the registers indicate single-stepping.
-func isSingleStepping(regs *syscall.PtraceRegs) bool {
+func isSingleStepping(regs *arch.Registers) bool {
return (regs.Eflags & arch.X86TrapFlag) != 0
}
// updateSyscallRegs updates registers after finishing sysemu.
-func updateSyscallRegs(regs *syscall.PtraceRegs) {
+func updateSyscallRegs(regs *arch.Registers) {
// Ptrace puts -ENOSYS in rax on syscall-enter-stop.
regs.Rax = regs.Orig_rax
}
// syscallReturnValue extracts a sensible return from registers.
-func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) {
+func syscallReturnValue(regs *arch.Registers) (uintptr, error) {
rval := int64(regs.Rax)
if rval < 0 {
return 0, syscall.Errno(-rval)
@@ -101,7 +101,7 @@ func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) {
return uintptr(rval), nil
}
-func dumpRegs(regs *syscall.PtraceRegs) string {
+func dumpRegs(regs *arch.Registers) string {
var m strings.Builder
fmt.Fprintf(&m, "Registers:\n")
@@ -143,7 +143,7 @@ func (t *thread) adjustInitRegsRip() {
}
// Pass the expected PPID to the child via R15 when creating stub process.
-func initChildProcessPPID(initregs *syscall.PtraceRegs, ppid int32) {
+func initChildProcessPPID(initregs *arch.Registers, ppid int32) {
initregs.R15 = uint64(ppid)
// Rbx has to be set to 1 when creating stub process.
initregs.Rbx = 1
@@ -156,7 +156,7 @@ func initChildProcessPPID(initregs *syscall.PtraceRegs, ppid int32) {
//
// Note that this should only be called after verifying that the signalInfo has
// been generated by the kernel.
-func patchSignalInfo(regs *syscall.PtraceRegs, signalInfo *arch.SignalInfo) {
+func patchSignalInfo(regs *arch.Registers, signalInfo *arch.SignalInfo) {
if linux.Signal(signalInfo.Signo) == linux.SIGSYS {
signalInfo.Signo = int32(linux.SIGSEGV)
diff --git a/pkg/sentry/platform/ptrace/subprocess_arm64.go b/pkg/sentry/platform/ptrace/subprocess_arm64.go
index 7f5c393f0..bd618fae8 100644
--- a/pkg/sentry/platform/ptrace/subprocess_arm64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_arm64.go
@@ -41,13 +41,13 @@ const (
// resetSysemuRegs sets up emulation registers.
//
// This should be called prior to calling sysemu.
-func (t *thread) resetSysemuRegs(regs *syscall.PtraceRegs) {
+func (t *thread) resetSysemuRegs(regs *arch.Registers) {
}
// createSyscallRegs sets up syscall registers.
//
// This should be called to generate registers for a system call.
-func createSyscallRegs(initRegs *syscall.PtraceRegs, sysno uintptr, args ...arch.SyscallArgument) syscall.PtraceRegs {
+func createSyscallRegs(initRegs *arch.Registers, sysno uintptr, args ...arch.SyscallArgument) arch.Registers {
// Copy initial registers (Pc, Sp, etc.).
regs := *initRegs
@@ -78,7 +78,7 @@ func createSyscallRegs(initRegs *syscall.PtraceRegs, sysno uintptr, args ...arch
}
// isSingleStepping determines if the registers indicate single-stepping.
-func isSingleStepping(regs *syscall.PtraceRegs) bool {
+func isSingleStepping(regs *arch.Registers) bool {
// Refer to the ARM SDM D2.12.3: software step state machine
// return (regs.Pstate.SS == 1) && (MDSCR_EL1.SS == 1).
//
@@ -89,13 +89,13 @@ func isSingleStepping(regs *syscall.PtraceRegs) bool {
}
// updateSyscallRegs updates registers after finishing sysemu.
-func updateSyscallRegs(regs *syscall.PtraceRegs) {
+func updateSyscallRegs(regs *arch.Registers) {
// No special work is necessary.
return
}
// syscallReturnValue extracts a sensible return from registers.
-func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) {
+func syscallReturnValue(regs *arch.Registers) (uintptr, error) {
rval := int64(regs.Regs[0])
if rval < 0 {
return 0, syscall.Errno(-rval)
@@ -103,7 +103,7 @@ func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) {
return uintptr(rval), nil
}
-func dumpRegs(regs *syscall.PtraceRegs) string {
+func dumpRegs(regs *arch.Registers) string {
var m strings.Builder
fmt.Fprintf(&m, "Registers:\n")
@@ -125,7 +125,7 @@ func (t *thread) adjustInitRegsRip() {
}
// Pass the expected PPID to the child via X7 when creating stub process
-func initChildProcessPPID(initregs *syscall.PtraceRegs, ppid int32) {
+func initChildProcessPPID(initregs *arch.Registers, ppid int32) {
initregs.Regs[7] = uint64(ppid)
// R9 has to be set to 1 when creating stub process.
initregs.Regs[9] = 1
@@ -138,7 +138,7 @@ func initChildProcessPPID(initregs *syscall.PtraceRegs, ppid int32) {
//
// Note that this should only be called after verifying that the signalInfo has
// been generated by the kernel.
-func patchSignalInfo(regs *syscall.PtraceRegs, signalInfo *arch.SignalInfo) {
+func patchSignalInfo(regs *arch.Registers, signalInfo *arch.SignalInfo) {
if linux.Signal(signalInfo.Signo) == linux.SIGSYS {
signalInfo.Signo = int32(linux.SIGSEGV)
diff --git a/pkg/sentry/platform/ring0/defs_impl_amd64.go b/pkg/sentry/platform/ring0/defs_impl_amd64.go
index e96c62c76..029d699fe 100755
--- a/pkg/sentry/platform/ring0/defs_impl_amd64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_amd64.go
@@ -1,14 +1,14 @@
package ring0
import (
- "fmt"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
+ "gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
+
+ "fmt"
"gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
- "syscall"
-
- "gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
)
// Kernel is a global kernel object.
@@ -63,7 +63,7 @@ type CPU struct {
// registers is a set of registers; these may be used on kernel system
// calls and exceptions via the Registers function.
- registers syscall.PtraceRegs
+ registers arch.Registers
// hooks are kernel hooks.
hooks Hooks
@@ -74,14 +74,14 @@ type CPU struct {
// This is explicitly safe to call during KernelException and KernelSyscall.
//
//go:nosplit
-func (c *CPU) Registers() *syscall.PtraceRegs {
+func (c *CPU) Registers() *arch.Registers {
return &c.registers
}
// SwitchOpts are passed to the Switch function.
type SwitchOpts struct {
// Registers are the user register state.
- Registers *syscall.PtraceRegs
+ Registers *arch.Registers
// FloatingPointState is a byte pointer where floating point state is
// saved and restored.
@@ -267,7 +267,7 @@ func Emit(w io.Writer) {
fmt.Fprintf(w, "#define SyscallInt80 0x%02x\n", SyscallInt80)
fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall)
- p := &syscall.PtraceRegs{}
+ p := &arch.Registers{}
fmt.Fprintf(w, "\n// Ptrace registers.\n")
fmt.Fprintf(w, "#define PTRACE_R15 0x%02x\n", reflect.ValueOf(&p.R15).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R14 0x%02x\n", reflect.ValueOf(&p.R14).Pointer()-reflect.ValueOf(p).Pointer())
diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go
index e29d62f04..93f23047d 100755
--- a/pkg/sentry/platform/ring0/defs_impl_arm64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go
@@ -1,13 +1,13 @@
package ring0
import (
- "syscall"
-
"fmt"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
+
+ "gvisor.dev/gvisor/pkg/usermem"
)
// Useful bits.
@@ -155,7 +155,7 @@ type CPU struct {
// registers is a set of registers; these may be used on kernel system
// calls and exceptions via the Registers function.
- registers syscall.PtraceRegs
+ registers arch.Registers
// hooks are kernel hooks.
hooks Hooks
@@ -166,14 +166,14 @@ type CPU struct {
// This is explicitly safe to call during KernelException and KernelSyscall.
//
//go:nosplit
-func (c *CPU) Registers() *syscall.PtraceRegs {
+func (c *CPU) Registers() *arch.Registers {
return &c.registers
}
// SwitchOpts are passed to the Switch function.
type SwitchOpts struct {
// Registers are the user register state.
- Registers *syscall.PtraceRegs
+ Registers *arch.Registers
// FloatingPointState is a byte pointer where floating point state is
// saved and restored.
@@ -377,7 +377,7 @@ func Emit(w io.Writer) {
fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall)
fmt.Fprintf(w, "#define VirtualizationException 0x%02x\n", VirtualizationException)
- p := &syscall.PtraceRegs{}
+ p := &arch.Registers{}
fmt.Fprintf(w, "\n// Ptrace registers.\n")
fmt.Fprintf(w, "#define PTRACE_R0 0x%02x\n", reflect.ValueOf(&p.Regs[0]).Pointer()-reflect.ValueOf(p).Pointer())
fmt.Fprintf(w, "#define PTRACE_R1 0x%02x\n", reflect.ValueOf(&p.Regs[1]).Pointer()-reflect.ValueOf(p).Pointer())
diff --git a/pkg/sentry/platform/ring0/entry_amd64.go b/pkg/sentry/platform/ring0/entry_amd64.go
index a5ce67885..7fa43c2f5 100644
--- a/pkg/sentry/platform/ring0/entry_amd64.go
+++ b/pkg/sentry/platform/ring0/entry_amd64.go
@@ -17,7 +17,7 @@
package ring0
import (
- "syscall"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
)
// This is an assembly function.
@@ -41,7 +41,7 @@ func swapgs()
// The return code is the vector that interrupted execution.
//
// See stubs.go for a note regarding the frame size of this function.
-func sysret(*CPU, *syscall.PtraceRegs) Vector
+func sysret(*CPU, *arch.Registers) Vector
// "iret is the cadillac of CPL switching."
//
@@ -50,7 +50,7 @@ func sysret(*CPU, *syscall.PtraceRegs) Vector
// iret is nearly identical to sysret, except an iret is used to fully restore
// all user state. This must be called in cases where all registers need to be
// restored.
-func iret(*CPU, *syscall.PtraceRegs) Vector
+func iret(*CPU, *arch.Registers) Vector
// exception is the generic exception entry.
//
diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go
index 7e1747a0c..582d37e03 100644
--- a/pkg/sentry/syscalls/linux/sys_signal.go
+++ b/pkg/sentry/syscalls/linux/sys_signal.go
@@ -355,7 +355,7 @@ func Pause(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
func RtSigpending(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
addr := args[0].Pointer()
pending := t.PendingSignals()
- _, err := t.CopyOut(addr, pending)
+ _, err := pending.CopyOut(t, addr)
return 0, nil, err
}
@@ -392,7 +392,7 @@ func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
if siginfo != 0 {
si.FixSignalCodeForUser()
- if _, err := t.CopyOut(siginfo, si); err != nil {
+ if _, err := si.CopyOut(t, siginfo); err != nil {
return 0, nil, err
}
}
@@ -411,7 +411,7 @@ func RtSigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
// same way), and that the code is in the allowed set. This same logic
// appears below in RtSigtgqueueinfo and should be kept in sync.
var info arch.SignalInfo
- if _, err := t.CopyIn(infoAddr, &info); err != nil {
+ if _, err := info.CopyIn(t, infoAddr); err != nil {
return 0, nil, err
}
info.Signo = int32(sig)
@@ -455,7 +455,7 @@ func RtTgsigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
// Copy in the info. See RtSigqueueinfo above.
var info arch.SignalInfo
- if _, err := t.CopyIn(infoAddr, &info); err != nil {
+ if _, err := info.CopyIn(t, infoAddr); err != nil {
return 0, nil, err
}
info.Signo = int32(sig)
@@ -485,7 +485,7 @@ func RtSigsuspend(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// Copy in the signal mask.
var mask linux.SignalSet
- if _, err := t.CopyIn(sigset, &mask); err != nil {
+ if _, err := mask.CopyIn(t, sigset); err != nil {
return 0, nil, err
}
mask &^= kernel.UnblockableSignals