summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/arch
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2019-06-02 06:44:55 +0000
committergVisor bot <gvisor-bot@google.com>2019-06-02 06:44:55 +0000
commitceb0d792f328d1fc0692197d8856a43c3936a571 (patch)
tree83155f302eff44a78bcc30a3a08f4efe59a79379 /pkg/sentry/arch
parentdeb7ecf1e46862d54f4b102f2d163cfbcfc37f3b (diff)
parent216da0b733dbed9aad9b2ab92ac75bcb906fd7ee (diff)
Merge 216da0b7 (automated)
Diffstat (limited to 'pkg/sentry/arch')
-rw-r--r--pkg/sentry/arch/aligned.go31
-rw-r--r--pkg/sentry/arch/arch.go359
-rw-r--r--pkg/sentry/arch/arch_amd64.go325
-rw-r--r--pkg/sentry/arch/arch_amd64.s135
-rwxr-xr-xpkg/sentry/arch/arch_state_autogen.go193
-rw-r--r--pkg/sentry/arch/arch_state_x86.go131
-rw-r--r--pkg/sentry/arch/arch_x86.go621
-rw-r--r--pkg/sentry/arch/auxv.go30
-rwxr-xr-xpkg/sentry/arch/registers_go_proto/registers.pb.go367
-rw-r--r--pkg/sentry/arch/signal_act.go79
-rw-r--r--pkg/sentry/arch/signal_amd64.go521
-rw-r--r--pkg/sentry/arch/signal_info.go66
-rw-r--r--pkg/sentry/arch/signal_stack.go65
-rw-r--r--pkg/sentry/arch/stack.go252
-rw-r--r--pkg/sentry/arch/syscalls_amd64.go52
15 files changed, 3227 insertions, 0 deletions
diff --git a/pkg/sentry/arch/aligned.go b/pkg/sentry/arch/aligned.go
new file mode 100644
index 000000000..df01a903d
--- /dev/null
+++ b/pkg/sentry/arch/aligned.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+import (
+ "reflect"
+)
+
+// alignedBytes returns a slice of size bytes, aligned in memory to the given
+// alignment. This is used because we require certain structures to be aligned
+// in a specific way (for example, the X86 floating point data).
+func alignedBytes(size, alignment uint) []byte {
+ data := make([]byte, size+alignment-1)
+ offset := uint(reflect.ValueOf(data).Index(0).Addr().Pointer() % uintptr(alignment))
+ if offset == 0 {
+ return data[:size:size]
+ }
+ return data[alignment-offset:][:size:size]
+}
diff --git a/pkg/sentry/arch/arch.go b/pkg/sentry/arch/arch.go
new file mode 100644
index 000000000..53f0c9018
--- /dev/null
+++ b/pkg/sentry/arch/arch.go
@@ -0,0 +1,359 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package arch provides abstractions around architecture-dependent details,
+// such as syscall calling conventions, native types, etc.
+package arch
+
+import (
+ "fmt"
+ "io"
+
+ "gvisor.googlesource.com/gvisor/pkg/abi/linux"
+ "gvisor.googlesource.com/gvisor/pkg/cpuid"
+ "gvisor.googlesource.com/gvisor/pkg/log"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/limits"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// Arch describes an architecture.
+type Arch int
+
+const (
+ // AMD64 is the x86-64 architecture.
+ AMD64 Arch = iota
+)
+
+// String implements fmt.Stringer.
+func (a Arch) String() string {
+ switch a {
+ case AMD64:
+ return "amd64"
+ default:
+ return fmt.Sprintf("Arch(%d)", a)
+ }
+}
+
+// FloatingPointData is a generic type, and will always be passed as a pointer.
+// We rely on the individual arch implementations to meet all the necessary
+// requirements. For example, on x86 the region must be 16-byte aligned and 512
+// bytes in size.
+type FloatingPointData byte
+
+// Context provides architecture-dependent information for a specific thread.
+//
+// NOTE(b/34169503): Currently we use uintptr here to refer to a generic native
+// register value. While this will work for the foreseeable future, it isn't
+// strictly correct. We may want to create some abstraction that makes this
+// more clear or enables us to store values of arbitrary widths. This is
+// particularly true for RegisterMap().
+type Context interface {
+ // Arch returns the architecture for this Context.
+ Arch() Arch
+
+ // Native converts a generic type to a native value.
+ //
+ // Because the architecture is not specified here, we may be dealing
+ // with return values of varying sizes (for example ARCH_GETFS). This
+ // is a simple utility function to convert to the native size in these
+ // cases, and then we can CopyOut.
+ Native(val uintptr) interface{}
+
+ // Value converts a native type back to a generic value.
+ // Once a value has been converted to native via the above call -- it
+ // can be converted back here.
+ Value(val interface{}) uintptr
+
+ // Width returns the number of bytes for a native value.
+ Width() uint
+
+ // Fork creates a clone of the context.
+ Fork() Context
+
+ // SyscallNo returns the syscall number.
+ SyscallNo() uintptr
+
+ // SyscallArgs returns the syscall arguments in an array.
+ SyscallArgs() SyscallArguments
+
+ // Return returns the return value for a system call.
+ Return() uintptr
+
+ // SetReturn sets the return value for a system call.
+ SetReturn(value uintptr)
+
+ // RestartSyscall reverses over the current syscall instruction, such that
+ // when the application resumes execution the syscall will be re-attempted.
+ RestartSyscall()
+
+ // RestartSyscallWithRestartBlock reverses over the current syscall
+ // instraction and overwrites the current syscall number with that of
+ // restart_syscall(2). This causes the application to restart the current
+ // syscall with a custom function when execution resumes.
+ RestartSyscallWithRestartBlock()
+
+ // IP returns the current instruction pointer.
+ IP() uintptr
+
+ // SetIP sets the current instruction pointer.
+ SetIP(value uintptr)
+
+ // Stack returns the current stack pointer.
+ Stack() uintptr
+
+ // SetStack sets the current stack pointer.
+ SetStack(value uintptr)
+
+ // TLS returns the current TLS pointer.
+ TLS() uintptr
+
+ // SetTLS sets the current TLS pointer. Returns false if value is invalid.
+ SetTLS(value uintptr) bool
+
+ // SetRSEQInterruptedIP sets the register that contains the old IP when a
+ // restartable sequence is interrupted.
+ SetRSEQInterruptedIP(value uintptr)
+
+ // StateData returns a pointer to underlying architecture state.
+ StateData() *State
+
+ // RegisterMap returns a map of all registers.
+ RegisterMap() (map[string]uintptr, error)
+
+ // NewSignalAct returns a new object that is equivalent to struct sigaction
+ // in the guest architecture.
+ NewSignalAct() NativeSignalAct
+
+ // NewSignalStack returns a new object that is equivalent to stack_t in the
+ // guest architecture.
+ NewSignalStack() NativeSignalStack
+
+ // SignalSetup modifies the context in preparation for handling the
+ // given signal.
+ //
+ // st is the stack where the signal handler frame should be
+ // constructed.
+ //
+ // act is the SignalAct that specifies how this signal is being
+ // handled.
+ //
+ // info is the SignalInfo of the signal being delivered.
+ //
+ // alt is the alternate signal stack (even if the alternate signal
+ // stack is not going to be used).
+ //
+ // sigset is the signal mask before entering the signal handler.
+ SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt *SignalStack, sigset linux.SignalSet) error
+
+ // SignalRestore restores context after returning from a signal
+ // handler.
+ //
+ // st is the current thread stack.
+ //
+ // rt is true if SignalRestore is being entered from rt_sigreturn and
+ // false if SignalRestore is being entered from sigreturn.
+ // SignalRestore returns the thread's new signal mask.
+ SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error)
+
+ // CPUIDEmulate emulates a CPUID instruction according to current register state.
+ CPUIDEmulate(l log.Logger)
+
+ // SingleStep returns true if single stepping is enabled.
+ SingleStep() bool
+
+ // SetSingleStep enables single stepping.
+ SetSingleStep()
+
+ // ClearSingleStep disables single stepping.
+ ClearSingleStep()
+
+ // FloatingPointData will be passed to underlying save routines.
+ FloatingPointData() *FloatingPointData
+
+ // NewMmapLayout returns a layout for a new MM, where MinAddr for the
+ // returned layout must be no lower than min, and MaxAddr for the returned
+ // layout must be no higher than max. Repeated calls to NewMmapLayout may
+ // return different layouts.
+ NewMmapLayout(min, max usermem.Addr, limits *limits.LimitSet) (MmapLayout, error)
+
+ // PIELoadAddress returns a preferred load address for a
+ // position-independent executable within l.
+ PIELoadAddress(l MmapLayout) usermem.Addr
+
+ // FeatureSet returns the FeatureSet in use in this context.
+ FeatureSet() *cpuid.FeatureSet
+
+ // Hack around our package dependences being too broken to support the
+ // equivalent of arch_ptrace():
+
+ // PtracePeekUser implements ptrace(PTRACE_PEEKUSR).
+ PtracePeekUser(addr uintptr) (interface{}, error)
+
+ // PtracePokeUser implements ptrace(PTRACE_POKEUSR).
+ PtracePokeUser(addr, data uintptr) error
+
+ // PtraceGetRegs implements ptrace(PTRACE_GETREGS) by writing the
+ // general-purpose registers represented by this Context to dst and
+ // returning the number of bytes written.
+ PtraceGetRegs(dst io.Writer) (int, error)
+
+ // PtraceSetRegs implements ptrace(PTRACE_SETREGS) by reading
+ // general-purpose registers from src into this Context and returning the
+ // number of bytes read.
+ PtraceSetRegs(src io.Reader) (int, error)
+
+ // PtraceGetFPRegs implements ptrace(PTRACE_GETFPREGS) by writing the
+ // floating-point registers represented by this Context to addr in dst and
+ // returning the number of bytes written.
+ PtraceGetFPRegs(dst io.Writer) (int, error)
+
+ // PtraceSetFPRegs implements ptrace(PTRACE_SETFPREGS) by reading
+ // floating-point registers from src into this Context and returning the
+ // number of bytes read.
+ PtraceSetFPRegs(src io.Reader) (int, error)
+
+ // PtraceGetRegSet implements ptrace(PTRACE_GETREGSET) by writing the
+ // register set given by architecture-defined value regset from this
+ // Context to dst and returning the number of bytes written, which must be
+ // less than or equal to maxlen.
+ PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error)
+
+ // PtraceSetRegSet implements ptrace(PTRACE_SETREGSET) by reading the
+ // register set given by architecture-defined value regset from src and
+ // returning the number of bytes read, which must be less than or equal to
+ // maxlen.
+ PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error)
+
+ // FullRestore returns 'true' if all CPU registers must be restored
+ // when switching to the untrusted application. Typically a task enters
+ // and leaves the kernel via a system call. Platform.Switch() may
+ // optimize for this by not saving/restoring all registers if allowed
+ // by the ABI. For e.g. the amd64 ABI specifies that syscall clobbers
+ // %rcx and %r11. If FullRestore returns true then these optimizations
+ // must be disabled and all registers restored.
+ FullRestore() bool
+}
+
+// MmapDirection is a search direction for mmaps.
+type MmapDirection int
+
+const (
+ // MmapBottomUp instructs mmap to prefer lower addresses.
+ MmapBottomUp MmapDirection = iota
+
+ // MmapTopDown instructs mmap to prefer higher addresses.
+ MmapTopDown
+)
+
+// MmapLayout defines the layout of the user address space for a particular
+// MemoryManager.
+//
+// Note that "highest address" below is always exclusive.
+//
+// +stateify savable
+type MmapLayout struct {
+ // MinAddr is the lowest mappable address.
+ MinAddr usermem.Addr
+
+ // MaxAddr is the highest mappable address.
+ MaxAddr usermem.Addr
+
+ // BottomUpBase is the lowest address that may be returned for a
+ // MmapBottomUp mmap.
+ BottomUpBase usermem.Addr
+
+ // TopDownBase is the highest address that may be returned for a
+ // MmapTopDown mmap.
+ TopDownBase usermem.Addr
+
+ // DefaultDirection is the direction for most non-fixed mmaps in this
+ // layout.
+ DefaultDirection MmapDirection
+
+ // MaxStackRand is the maximum randomization to apply to stack
+ // allocations to maintain a proper gap between the stack and
+ // TopDownBase.
+ MaxStackRand uint64
+}
+
+// Valid returns true if this layout is valid.
+func (m *MmapLayout) Valid() bool {
+ if m.MinAddr > m.MaxAddr {
+ return false
+ }
+ if m.BottomUpBase < m.MinAddr {
+ return false
+ }
+ if m.BottomUpBase > m.MaxAddr {
+ return false
+ }
+ if m.TopDownBase < m.MinAddr {
+ return false
+ }
+ if m.TopDownBase > m.MaxAddr {
+ return false
+ }
+ return true
+}
+
+// SyscallArgument is an argument supplied to a syscall implementation. The
+// methods used to access the arguments are named after the ***C type name*** and
+// they convert to the closest Go type available. For example, Int() refers to a
+// 32-bit signed integer argument represented in Go as an int32.
+//
+// Using the accessor methods guarantees that the conversion between types is
+// correct, taking into account size and signedness (i.e., zero-extension vs
+// signed-extension).
+type SyscallArgument struct {
+ // Prefer to use accessor methods instead of 'Value' directly.
+ Value uintptr
+}
+
+// SyscallArguments represents the set of arguments passed to a syscall.
+type SyscallArguments [6]SyscallArgument
+
+// Pointer returns the usermem.Addr representation of a pointer argument.
+func (a SyscallArgument) Pointer() usermem.Addr {
+ return usermem.Addr(a.Value)
+}
+
+// Int returns the int32 representation of a 32-bit signed integer argument.
+func (a SyscallArgument) Int() int32 {
+ return int32(a.Value)
+}
+
+// Uint returns the uint32 representation of a 32-bit unsigned integer argument.
+func (a SyscallArgument) Uint() uint32 {
+ return uint32(a.Value)
+}
+
+// Int64 returns the int64 representation of a 64-bit signed integer argument.
+func (a SyscallArgument) Int64() int64 {
+ return int64(a.Value)
+}
+
+// Uint64 returns the uint64 representation of a 64-bit unsigned integer argument.
+func (a SyscallArgument) Uint64() uint64 {
+ return uint64(a.Value)
+}
+
+// SizeT returns the uint representation of a size_t argument.
+func (a SyscallArgument) SizeT() uint {
+ return uint(a.Value)
+}
+
+// ModeT returns the int representation of a mode_t argument.
+func (a SyscallArgument) ModeT() uint {
+ return uint(uint16(a.Value))
+}
diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go
new file mode 100644
index 000000000..135c2ee1f
--- /dev/null
+++ b/pkg/sentry/arch/arch_amd64.go
@@ -0,0 +1,325 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+package arch
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "syscall"
+
+ "gvisor.googlesource.com/gvisor/pkg/binary"
+ "gvisor.googlesource.com/gvisor/pkg/cpuid"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/limits"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// Host specifies the host architecture.
+const Host = AMD64
+
+// These constants come directly from Linux.
+const (
+ // maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux
+ // for a 64-bit process.
+ maxAddr64 usermem.Addr = (1 << 47) - usermem.PageSize
+
+ // maxStackRand64 is the maximum randomization to apply to the stack.
+ // It is defined by arch/x86/mm/mmap.c:stack_maxrandom_size in Linux.
+ maxStackRand64 = 16 << 30 // 16 GB
+
+ // maxMmapRand64 is the maximum randomization to apply to the mmap
+ // layout. It is defined by arch/x86/mm/mmap.c:arch_mmap_rnd in Linux.
+ maxMmapRand64 = (1 << 28) * usermem.PageSize
+
+ // minGap64 is the minimum gap to leave at the top of the address space
+ // for the stack. It is defined by arch/x86/mm/mmap.c:MIN_GAP in Linux.
+ minGap64 = (128 << 20) + maxStackRand64
+
+ // preferredPIELoadAddr is the standard Linux position-independent
+ // executable base load address. It is ELF_ET_DYN_BASE in Linux.
+ //
+ // The Platform {Min,Max}UserAddress() may preclude loading at this
+ // address. See other preferredFoo comments below.
+ preferredPIELoadAddr usermem.Addr = maxAddr64 / 3 * 2
+)
+
+// These constants are selected as heuristics to help make the Platform's
+// potentially limited address space conform as closely to Linux as possible.
+const (
+ // Select a preferred minimum TopDownBase address.
+ //
+ // Some applications (TSAN and other *SANs) are very particular about
+ // the way the Linux mmap allocator layouts out the address space.
+ //
+ // TSAN in particular expects top down allocations to be made in the
+ // range [0x7e8000000000, 0x800000000000).
+ //
+ // The minimum TopDownBase on Linux would be:
+ // 0x800000000000 - minGap64 - maxMmapRand64 = 0x7efbf8000000.
+ //
+ // (minGap64 because TSAN uses a small RLIMIT_STACK.)
+ //
+ // 0x7e8000000000 is selected arbitrarily by TSAN to leave room for
+ // allocations below TopDownBase.
+ //
+ // N.B. ASAN and MSAN are more forgiving; ASAN allows allocations all
+ // the way down to 0x10007fff8000, and MSAN down to 0x700000000000.
+ //
+ // Of course, there is no hard minimum to allocation; an allocator can
+ // search all the way from TopDownBase to Min. However, TSAN declared
+ // their range "good enough".
+ //
+ // We would like to pick a TopDownBase such that it is unlikely that an
+ // allocator will select an address below TSAN's minimum. We achieve
+ // this by trying to leave a sizable gap below TopDownBase.
+ //
+ // This is all "preferred" because the layout min/max address may not
+ // allow us to select such a TopDownBase, in which case we have to fall
+ // back to a layout that TSAN may not be happy with.
+ preferredTopDownAllocMin usermem.Addr = 0x7e8000000000
+ preferredAllocationGap = 128 << 30 // 128 GB
+ preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
+
+ // minMmapRand64 is the smallest we are willing to make the
+ // randomization to stay above preferredTopDownBaseMin.
+ minMmapRand64 = (1 << 26) * usermem.PageSize
+)
+
+// context64 represents an AMD64 context.
+//
+// +stateify savable
+type context64 struct {
+ State
+ sigFPState []x86FPState // fpstate to be restored on sigreturn.
+}
+
+// Arch implements Context.Arch.
+func (c *context64) Arch() Arch {
+ return AMD64
+}
+
+func (c *context64) copySigFPState() []x86FPState {
+ var sigfps []x86FPState
+ for _, s := range c.sigFPState {
+ sigfps = append(sigfps, s.fork())
+ }
+ return sigfps
+}
+
+// Fork returns an exact copy of this context.
+func (c *context64) Fork() Context {
+ return &context64{
+ State: c.State.Fork(),
+ sigFPState: c.copySigFPState(),
+ }
+}
+
+// Return returns the current syscall return value.
+func (c *context64) Return() uintptr {
+ return uintptr(c.Regs.Rax)
+}
+
+// SetReturn sets the syscall return value.
+func (c *context64) SetReturn(value uintptr) {
+ c.Regs.Rax = uint64(value)
+}
+
+// IP returns the current instruction pointer.
+func (c *context64) IP() uintptr {
+ return uintptr(c.Regs.Rip)
+}
+
+// SetIP sets the current instruction pointer.
+func (c *context64) SetIP(value uintptr) {
+ c.Regs.Rip = uint64(value)
+}
+
+// Stack returns the current stack pointer.
+func (c *context64) Stack() uintptr {
+ return uintptr(c.Regs.Rsp)
+}
+
+// SetStack sets the current stack pointer.
+func (c *context64) SetStack(value uintptr) {
+ c.Regs.Rsp = uint64(value)
+}
+
+// TLS returns the current TLS pointer.
+func (c *context64) TLS() uintptr {
+ return uintptr(c.Regs.Fs_base)
+}
+
+// SetTLS sets the current TLS pointer. Returns false if value is invalid.
+func (c *context64) SetTLS(value uintptr) bool {
+ if !isValidSegmentBase(uint64(value)) {
+ return false
+ }
+
+ c.Regs.Fs = 0
+ c.Regs.Fs_base = uint64(value)
+ return true
+}
+
+// SetRSEQInterruptedIP implements Context.SetRSEQInterruptedIP.
+func (c *context64) SetRSEQInterruptedIP(value uintptr) {
+ c.Regs.R10 = uint64(value)
+}
+
+// Native returns the native type for the given val.
+func (c *context64) Native(val uintptr) interface{} {
+ v := uint64(val)
+ return &v
+}
+
+// Value returns the generic val for the given native type.
+func (c *context64) Value(val interface{}) uintptr {
+ return uintptr(*val.(*uint64))
+}
+
+// Width returns the byte width of this architecture.
+func (c *context64) Width() uint {
+ return 8
+}
+
+// FeatureSet returns the FeatureSet in use.
+func (c *context64) FeatureSet() *cpuid.FeatureSet {
+ return c.State.FeatureSet
+}
+
+// mmapRand returns a random adjustment for randomizing an mmap layout.
+func mmapRand(max uint64) usermem.Addr {
+ return usermem.Addr(rand.Int63n(int64(max))).RoundDown()
+}
+
+// NewMmapLayout implements Context.NewMmapLayout consistently with Linux.
+func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) {
+ min, ok := min.RoundUp()
+ if !ok {
+ return MmapLayout{}, syscall.EINVAL
+ }
+ if max > maxAddr64 {
+ max = maxAddr64
+ }
+ max = max.RoundDown()
+
+ if min > max {
+ return MmapLayout{}, syscall.EINVAL
+ }
+
+ stackSize := r.Get(limits.Stack)
+
+ // MAX_GAP in Linux.
+ maxGap := (max / 6) * 5
+ gap := usermem.Addr(stackSize.Cur)
+ if gap < minGap64 {
+ gap = minGap64
+ }
+ if gap > maxGap {
+ gap = maxGap
+ }
+ defaultDir := MmapTopDown
+ if stackSize.Cur == limits.Infinity {
+ defaultDir = MmapBottomUp
+ }
+
+ topDownMin := max - gap - maxMmapRand64
+ maxRand := usermem.Addr(maxMmapRand64)
+ if topDownMin < preferredTopDownBaseMin {
+ // Try to keep TopDownBase above preferredTopDownBaseMin by
+ // shrinking maxRand.
+ maxAdjust := maxRand - minMmapRand64
+ needAdjust := preferredTopDownBaseMin - topDownMin
+ if needAdjust <= maxAdjust {
+ maxRand -= needAdjust
+ }
+ }
+
+ rnd := mmapRand(uint64(maxRand))
+ l := MmapLayout{
+ MinAddr: min,
+ MaxAddr: max,
+ // TASK_UNMAPPED_BASE in Linux.
+ BottomUpBase: (max/3 + rnd).RoundDown(),
+ TopDownBase: (max - gap - rnd).RoundDown(),
+ DefaultDirection: defaultDir,
+ // We may have reduced the maximum randomization to keep
+ // TopDownBase above preferredTopDownBaseMin while maintaining
+ // our stack gap. Stack allocations must use that max
+ // randomization to avoiding eating into the gap.
+ MaxStackRand: uint64(maxRand),
+ }
+
+ // Final sanity check on the layout.
+ if !l.Valid() {
+ panic(fmt.Sprintf("Invalid MmapLayout: %+v", l))
+ }
+
+ return l, nil
+}
+
+// PIELoadAddress implements Context.PIELoadAddress.
+func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr {
+ base := preferredPIELoadAddr
+ max, ok := base.AddLength(maxMmapRand64)
+ if !ok {
+ panic(fmt.Sprintf("preferredPIELoadAddr %#x too large", base))
+ }
+
+ if max > l.MaxAddr {
+ // preferredPIELoadAddr won't fit; fall back to the standard
+ // Linux behavior of 2/3 of TopDownBase. TSAN won't like this.
+ //
+ // Don't bother trying to shrink the randomization for now.
+ base = l.TopDownBase / 3 * 2
+ }
+
+ return base + mmapRand(maxMmapRand64)
+}
+
+// userStructSize is the size in bytes of Linux's struct user on amd64.
+const userStructSize = 928
+
+// PtracePeekUser implements Context.PtracePeekUser.
+func (c *context64) PtracePeekUser(addr uintptr) (interface{}, error) {
+ if addr&7 != 0 || addr >= userStructSize {
+ return nil, syscall.EIO
+ }
+ // PTRACE_PEEKUSER and PTRACE_POKEUSER are only effective on regs and
+ // u_debugreg, returning 0 or silently no-oping for other fields
+ // respectively.
+ if addr < uintptr(ptraceRegsSize) {
+ buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs())
+ return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil
+ }
+ // TODO(b/34088053): debug registers
+ return c.Native(0), nil
+}
+
+// PtracePokeUser implements Context.PtracePokeUser.
+func (c *context64) PtracePokeUser(addr, data uintptr) error {
+ if addr&7 != 0 || addr >= userStructSize {
+ return syscall.EIO
+ }
+ if addr < uintptr(ptraceRegsSize) {
+ buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs())
+ usermem.ByteOrder.PutUint64(buf[addr:], uint64(data))
+ _, err := c.PtraceSetRegs(bytes.NewBuffer(buf))
+ return err
+ }
+ // TODO(b/34088053): debug registers
+ return nil
+}
diff --git a/pkg/sentry/arch/arch_amd64.s b/pkg/sentry/arch/arch_amd64.s
new file mode 100644
index 000000000..bd61402cf
--- /dev/null
+++ b/pkg/sentry/arch/arch_amd64.s
@@ -0,0 +1,135 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "textflag.h"
+
+// MXCSR_DEFAULT is the reset value of MXCSR (Intel SDM Vol. 2, Ch. 3.2
+// "LDMXCSR")
+#define MXCSR_DEFAULT 0x1f80
+
+// MXCSR_OFFSET is the offset in bytes of the MXCSR field from the start of the
+// FXSAVE/XSAVE area. (Intel SDM Vol. 1, Table 10-2 "Format of an FXSAVE Area")
+#define MXCSR_OFFSET 24
+
+// initX86FPState initializes floating point state.
+//
+// func initX86FPState(data *FloatingPointData, useXsave bool)
+//
+// We need to clear out and initialize an empty fp state area since the sentry
+// may have left sensitive information in the floating point registers.
+//
+// Preconditions: data is zeroed
+TEXT ·initX86FPState(SB), $24-16
+ // Save MXCSR (callee-save)
+ STMXCSR mxcsr-8(SP)
+
+ // Save x87 CW (callee-save)
+ FSTCW cw-16(SP)
+
+ MOVQ fpState+0(FP), DI
+
+ // Do we use xsave?
+ MOVBQZX useXsave+8(FP), AX
+ TESTQ AX, AX
+ JZ no_xsave
+
+ // Use XRSTOR to clear all FP state to an initial state.
+ //
+ // The fpState XSAVE area is zeroed on function entry, meaning
+ // XSTATE_BV is zero.
+ //
+ // "If RFBM[i] = 1 and bit i is clear in the XSTATE_BV field in the
+ // XSAVE header, XRSTOR initializes state component i."
+ //
+ // Initialization is defined in SDM Vol 1, Chapter 13.3. It puts all
+ // the registers in a reasonable initial state, except MXCSR:
+ //
+ // "The MXCSR register is part of state component 1, SSE state (see
+ // Section 13.5.2). However, the standard form of XRSTOR loads the
+ // MXCSR register from memory whenever the RFBM[1] (SSE) or RFBM[2]
+ // (AVX) is set, regardless of the values of XSTATE_BV[1] and
+ // XSTATE_BV[2]."
+
+ // Set MXCSR to the default value.
+ MOVL $MXCSR_DEFAULT, MXCSR_OFFSET(DI)
+
+ // Initialize registers with XRSTOR.
+ MOVL $0xffffffff, AX
+ MOVL $0xffffffff, DX
+ BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x2f // XRSTOR64 0(DI)
+
+ // Now that all the state has been reset, write it back out to the
+ // XSAVE area.
+ BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x27 // XSAVE64 0(DI)
+
+ JMP out
+
+no_xsave:
+ // Clear out existing X values.
+ PXOR X0, X0
+ MOVO X0, X1
+ MOVO X0, X2
+ MOVO X0, X3
+ MOVO X0, X4
+ MOVO X0, X5
+ MOVO X0, X6
+ MOVO X0, X7
+ MOVO X0, X8
+ MOVO X0, X9
+ MOVO X0, X10
+ MOVO X0, X11
+ MOVO X0, X12
+ MOVO X0, X13
+ MOVO X0, X14
+ MOVO X0, X15
+
+ // Zero out %rax and store into MMX registers. MMX registers are
+ // an alias of 8x64 bits of the 8x80 bits used for the original
+ // x87 registers. Storing zero into them will reset the FPU registers
+ // to bits [63:0] = 0, [79:64] = 1. But the contents aren't too
+ // important, just the fact that we have reset them to a known value.
+ XORQ AX, AX
+ MOVQ AX, M0
+ MOVQ AX, M1
+ MOVQ AX, M2
+ MOVQ AX, M3
+ MOVQ AX, M4
+ MOVQ AX, M5
+ MOVQ AX, M6
+ MOVQ AX, M7
+
+ // The Go assembler doesn't support FNINIT, so we use BYTE.
+ // This will:
+ // - Reset FPU control word to 0x037f
+ // - Clear FPU status word
+ // - Reset FPU tag word to 0xffff
+ // - Clear FPU data pointer
+ // - Clear FPU instruction pointer
+ BYTE $0xDB; BYTE $0xE3; // FNINIT
+
+ // Reset MXCSR.
+ MOVL $MXCSR_DEFAULT, tmpmxcsr-24(SP)
+ LDMXCSR tmpmxcsr-24(SP)
+
+ // Save the floating point state with fxsave.
+ FXSAVE64 0(DI)
+
+out:
+ // Restore MXCSR.
+ LDMXCSR mxcsr-8(SP)
+
+ // Restore x87 CW.
+ FLDCW cw-16(SP)
+
+ RET
diff --git a/pkg/sentry/arch/arch_state_autogen.go b/pkg/sentry/arch/arch_state_autogen.go
new file mode 100755
index 000000000..0c3b11507
--- /dev/null
+++ b/pkg/sentry/arch/arch_state_autogen.go
@@ -0,0 +1,193 @@
+// automatically generated by stateify.
+
+package arch
+
+import (
+ "gvisor.googlesource.com/gvisor/pkg/state"
+)
+
+func (x *MmapLayout) beforeSave() {}
+func (x *MmapLayout) save(m state.Map) {
+ x.beforeSave()
+ m.Save("MinAddr", &x.MinAddr)
+ m.Save("MaxAddr", &x.MaxAddr)
+ m.Save("BottomUpBase", &x.BottomUpBase)
+ m.Save("TopDownBase", &x.TopDownBase)
+ m.Save("DefaultDirection", &x.DefaultDirection)
+ m.Save("MaxStackRand", &x.MaxStackRand)
+}
+
+func (x *MmapLayout) afterLoad() {}
+func (x *MmapLayout) load(m state.Map) {
+ m.Load("MinAddr", &x.MinAddr)
+ m.Load("MaxAddr", &x.MaxAddr)
+ m.Load("BottomUpBase", &x.BottomUpBase)
+ m.Load("TopDownBase", &x.TopDownBase)
+ m.Load("DefaultDirection", &x.DefaultDirection)
+ m.Load("MaxStackRand", &x.MaxStackRand)
+}
+
+func (x *context64) beforeSave() {}
+func (x *context64) save(m state.Map) {
+ x.beforeSave()
+ m.Save("State", &x.State)
+ m.Save("sigFPState", &x.sigFPState)
+}
+
+func (x *context64) afterLoad() {}
+func (x *context64) load(m state.Map) {
+ m.Load("State", &x.State)
+ m.Load("sigFPState", &x.sigFPState)
+}
+
+func (x *syscallPtraceRegs) beforeSave() {}
+func (x *syscallPtraceRegs) save(m state.Map) {
+ x.beforeSave()
+ m.Save("R15", &x.R15)
+ m.Save("R14", &x.R14)
+ m.Save("R13", &x.R13)
+ m.Save("R12", &x.R12)
+ m.Save("Rbp", &x.Rbp)
+ m.Save("Rbx", &x.Rbx)
+ m.Save("R11", &x.R11)
+ m.Save("R10", &x.R10)
+ m.Save("R9", &x.R9)
+ m.Save("R8", &x.R8)
+ m.Save("Rax", &x.Rax)
+ m.Save("Rcx", &x.Rcx)
+ m.Save("Rdx", &x.Rdx)
+ m.Save("Rsi", &x.Rsi)
+ m.Save("Rdi", &x.Rdi)
+ m.Save("Orig_rax", &x.Orig_rax)
+ m.Save("Rip", &x.Rip)
+ m.Save("Cs", &x.Cs)
+ m.Save("Eflags", &x.Eflags)
+ m.Save("Rsp", &x.Rsp)
+ m.Save("Ss", &x.Ss)
+ m.Save("Fs_base", &x.Fs_base)
+ m.Save("Gs_base", &x.Gs_base)
+ m.Save("Ds", &x.Ds)
+ m.Save("Es", &x.Es)
+ m.Save("Fs", &x.Fs)
+ m.Save("Gs", &x.Gs)
+}
+
+func (x *syscallPtraceRegs) afterLoad() {}
+func (x *syscallPtraceRegs) load(m state.Map) {
+ m.Load("R15", &x.R15)
+ m.Load("R14", &x.R14)
+ m.Load("R13", &x.R13)
+ m.Load("R12", &x.R12)
+ m.Load("Rbp", &x.Rbp)
+ m.Load("Rbx", &x.Rbx)
+ m.Load("R11", &x.R11)
+ m.Load("R10", &x.R10)
+ m.Load("R9", &x.R9)
+ m.Load("R8", &x.R8)
+ m.Load("Rax", &x.Rax)
+ m.Load("Rcx", &x.Rcx)
+ m.Load("Rdx", &x.Rdx)
+ m.Load("Rsi", &x.Rsi)
+ m.Load("Rdi", &x.Rdi)
+ m.Load("Orig_rax", &x.Orig_rax)
+ m.Load("Rip", &x.Rip)
+ m.Load("Cs", &x.Cs)
+ m.Load("Eflags", &x.Eflags)
+ m.Load("Rsp", &x.Rsp)
+ m.Load("Ss", &x.Ss)
+ m.Load("Fs_base", &x.Fs_base)
+ m.Load("Gs_base", &x.Gs_base)
+ m.Load("Ds", &x.Ds)
+ m.Load("Es", &x.Es)
+ m.Load("Fs", &x.Fs)
+ m.Load("Gs", &x.Gs)
+}
+
+func (x *State) beforeSave() {}
+func (x *State) save(m state.Map) {
+ x.beforeSave()
+ var Regs syscallPtraceRegs = x.saveRegs()
+ m.SaveValue("Regs", Regs)
+ m.Save("x86FPState", &x.x86FPState)
+ m.Save("FeatureSet", &x.FeatureSet)
+}
+
+func (x *State) load(m state.Map) {
+ m.LoadWait("x86FPState", &x.x86FPState)
+ m.Load("FeatureSet", &x.FeatureSet)
+ m.LoadValue("Regs", new(syscallPtraceRegs), func(y interface{}) { x.loadRegs(y.(syscallPtraceRegs)) })
+ m.AfterLoad(x.afterLoad)
+}
+
+func (x *AuxEntry) beforeSave() {}
+func (x *AuxEntry) save(m state.Map) {
+ x.beforeSave()
+ m.Save("Key", &x.Key)
+ m.Save("Value", &x.Value)
+}
+
+func (x *AuxEntry) afterLoad() {}
+func (x *AuxEntry) load(m state.Map) {
+ m.Load("Key", &x.Key)
+ m.Load("Value", &x.Value)
+}
+
+func (x *SignalAct) beforeSave() {}
+func (x *SignalAct) save(m state.Map) {
+ x.beforeSave()
+ m.Save("Handler", &x.Handler)
+ m.Save("Flags", &x.Flags)
+ m.Save("Restorer", &x.Restorer)
+ m.Save("Mask", &x.Mask)
+}
+
+func (x *SignalAct) afterLoad() {}
+func (x *SignalAct) load(m state.Map) {
+ m.Load("Handler", &x.Handler)
+ m.Load("Flags", &x.Flags)
+ m.Load("Restorer", &x.Restorer)
+ m.Load("Mask", &x.Mask)
+}
+
+func (x *SignalStack) beforeSave() {}
+func (x *SignalStack) save(m state.Map) {
+ x.beforeSave()
+ m.Save("Addr", &x.Addr)
+ m.Save("Flags", &x.Flags)
+ m.Save("Size", &x.Size)
+}
+
+func (x *SignalStack) afterLoad() {}
+func (x *SignalStack) load(m state.Map) {
+ m.Load("Addr", &x.Addr)
+ m.Load("Flags", &x.Flags)
+ m.Load("Size", &x.Size)
+}
+
+func (x *SignalInfo) beforeSave() {}
+func (x *SignalInfo) save(m state.Map) {
+ x.beforeSave()
+ m.Save("Signo", &x.Signo)
+ m.Save("Errno", &x.Errno)
+ m.Save("Code", &x.Code)
+ m.Save("Fields", &x.Fields)
+}
+
+func (x *SignalInfo) afterLoad() {}
+func (x *SignalInfo) load(m state.Map) {
+ m.Load("Signo", &x.Signo)
+ m.Load("Errno", &x.Errno)
+ m.Load("Code", &x.Code)
+ m.Load("Fields", &x.Fields)
+}
+
+func init() {
+ state.Register("arch.MmapLayout", (*MmapLayout)(nil), state.Fns{Save: (*MmapLayout).save, Load: (*MmapLayout).load})
+ state.Register("arch.context64", (*context64)(nil), state.Fns{Save: (*context64).save, Load: (*context64).load})
+ state.Register("arch.syscallPtraceRegs", (*syscallPtraceRegs)(nil), state.Fns{Save: (*syscallPtraceRegs).save, Load: (*syscallPtraceRegs).load})
+ state.Register("arch.State", (*State)(nil), state.Fns{Save: (*State).save, Load: (*State).load})
+ state.Register("arch.AuxEntry", (*AuxEntry)(nil), state.Fns{Save: (*AuxEntry).save, Load: (*AuxEntry).load})
+ state.Register("arch.SignalAct", (*SignalAct)(nil), state.Fns{Save: (*SignalAct).save, Load: (*SignalAct).load})
+ state.Register("arch.SignalStack", (*SignalStack)(nil), state.Fns{Save: (*SignalStack).save, Load: (*SignalStack).load})
+ state.Register("arch.SignalInfo", (*SignalInfo)(nil), state.Fns{Save: (*SignalInfo).save, Load: (*SignalInfo).load})
+}
diff --git a/pkg/sentry/arch/arch_state_x86.go b/pkg/sentry/arch/arch_state_x86.go
new file mode 100644
index 000000000..bb52d8db0
--- /dev/null
+++ b/pkg/sentry/arch/arch_state_x86.go
@@ -0,0 +1,131 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+import (
+ "fmt"
+ "syscall"
+
+ "gvisor.googlesource.com/gvisor/pkg/cpuid"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// ErrFloatingPoint indicates a failed restore due to unusable floating point
+// state.
+type ErrFloatingPoint struct {
+ // supported is the supported floating point state.
+ supported uint64
+
+ // saved is the saved floating point state.
+ saved uint64
+}
+
+// Error returns a sensible description of the restore error.
+func (e ErrFloatingPoint) Error() string {
+ return fmt.Sprintf("floating point state contains unsupported features; supported: %#x saved: %#x", e.supported, e.saved)
+}
+
+// XSTATE_BV does not exist if FXSAVE is used, but FXSAVE implicitly saves x87
+// and SSE state, so this is the equivalent XSTATE_BV value.
+const fxsaveBV uint64 = cpuid.XSAVEFeatureX87 | cpuid.XSAVEFeatureSSE
+
+// afterLoad is invoked by stateify.
+func (s *State) afterLoad() {
+ old := s.x86FPState
+
+ // Recreate the slice. This is done to ensure that it is aligned
+ // appropriately in memory, and large enough to accommodate any new
+ // state that may be saved by the new CPU. Even if extraneous new state
+ // is saved, the state we care about is guaranteed to be a subset of
+ // new state. Later optimizations can use less space when using a
+ // smaller state component bitmap. Intel SDM Volume 1 Chapter 13 has
+ // more info.
+ s.x86FPState = newX86FPState()
+
+ // x86FPState always contains all the FP state supported by the host.
+ // We may have come from a newer machine that supports additional state
+ // which we cannot restore.
+ //
+ // The x86 FP state areas are backwards compatible, so we can simply
+ // truncate the additional floating point state.
+ //
+ // Applications should not depend on the truncated state because it
+ // should relate only to features that were not exposed in the app
+ // FeatureSet. However, because we do not *prevent* them from using
+ // this state, we must verify here that there is no in-use state
+ // (according to XSTATE_BV) which we do not support.
+ if len(s.x86FPState) < len(old) {
+ // What do we support?
+ supportedBV := fxsaveBV
+ if fs := cpuid.HostFeatureSet(); fs.UseXsave() {
+ supportedBV = fs.ValidXCR0Mask()
+ }
+
+ // What was in use?
+ savedBV := fxsaveBV
+ if len(old) >= xstateBVOffset+8 {
+ savedBV = usermem.ByteOrder.Uint64(old[xstateBVOffset:])
+ }
+
+ // Supported features must be a superset of saved features.
+ if savedBV&^supportedBV != 0 {
+ panic(ErrFloatingPoint{supported: supportedBV, saved: savedBV})
+ }
+ }
+
+ // Copy to the new, aligned location.
+ copy(s.x86FPState, old)
+}
+
+// +stateify savable
+type syscallPtraceRegs struct {
+ R15 uint64
+ R14 uint64
+ R13 uint64
+ R12 uint64
+ Rbp uint64
+ Rbx uint64
+ R11 uint64
+ R10 uint64
+ R9 uint64
+ R8 uint64
+ Rax uint64
+ Rcx uint64
+ Rdx uint64
+ Rsi uint64
+ Rdi uint64
+ Orig_rax uint64
+ Rip uint64
+ Cs uint64
+ Eflags uint64
+ Rsp uint64
+ Ss uint64
+ Fs_base uint64
+ Gs_base uint64
+ Ds uint64
+ Es uint64
+ Fs uint64
+ Gs uint64
+}
+
+// saveRegs is invoked by stateify.
+func (s *State) saveRegs() syscallPtraceRegs {
+ return syscallPtraceRegs(s.Regs)
+}
+
+// loadRegs is invoked by stateify.
+func (s *State) loadRegs(r syscallPtraceRegs) {
+ s.Regs = syscall.PtraceRegs(r)
+}
diff --git a/pkg/sentry/arch/arch_x86.go b/pkg/sentry/arch/arch_x86.go
new file mode 100644
index 000000000..4d167ce98
--- /dev/null
+++ b/pkg/sentry/arch/arch_x86.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64 i386
+
+package arch
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "syscall"
+
+ "gvisor.googlesource.com/gvisor/pkg/binary"
+ "gvisor.googlesource.com/gvisor/pkg/cpuid"
+ "gvisor.googlesource.com/gvisor/pkg/log"
+ rpb "gvisor.googlesource.com/gvisor/pkg/sentry/arch/registers_go_proto"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+ "gvisor.googlesource.com/gvisor/pkg/syserror"
+)
+
+// System-related constants for x86.
+const (
+ // SyscallWidth is the width of syscall, sysenter, and int 80 insturctions.
+ SyscallWidth = 2
+)
+
+// EFLAGS register bits.
+const (
+ // eflagsCF is the mask for the carry flag.
+ eflagsCF = uint64(1) << 0
+ // eflagsPF is the mask for the parity flag.
+ eflagsPF = uint64(1) << 2
+ // eflagsAF is the mask for the auxiliary carry flag.
+ eflagsAF = uint64(1) << 4
+ // eflagsZF is the mask for the zero flag.
+ eflagsZF = uint64(1) << 6
+ // eflagsSF is the mask for the sign flag.
+ eflagsSF = uint64(1) << 7
+ // eflagsTF is the mask for the trap flag.
+ eflagsTF = uint64(1) << 8
+ // eflagsIF is the mask for the interrupt flag.
+ eflagsIF = uint64(1) << 9
+ // eflagsDF is the mask for the direction flag.
+ eflagsDF = uint64(1) << 10
+ // eflagsOF is the mask for the overflow flag.
+ eflagsOF = uint64(1) << 11
+ // eflagsIOPL is the mask for the I/O privilege level.
+ eflagsIOPL = uint64(3) << 12
+ // eflagsNT is the mask for the nested task bit.
+ eflagsNT = uint64(1) << 14
+ // eflagsRF is the mask for the resume flag.
+ eflagsRF = uint64(1) << 16
+ // eflagsVM is the mask for the virtual mode bit.
+ eflagsVM = uint64(1) << 17
+ // eflagsAC is the mask for the alignment check / access control bit.
+ eflagsAC = uint64(1) << 18
+ // eflagsVIF is the mask for the virtual interrupt flag.
+ eflagsVIF = uint64(1) << 19
+ // eflagsVIP is the mask for the virtual interrupt pending bit.
+ eflagsVIP = uint64(1) << 20
+ // eflagsID is the mask for the CPUID detection bit.
+ eflagsID = uint64(1) << 21
+
+ // eflagsPtraceMutable is the mask for the set of EFLAGS that may be
+ // changed by ptrace(PTRACE_SETREGS). eflagsPtraceMutable is analogous to
+ // Linux's FLAG_MASK.
+ eflagsPtraceMutable = eflagsCF | eflagsPF | eflagsAF | eflagsZF | eflagsSF | eflagsTF | eflagsDF | eflagsOF | eflagsRF | eflagsAC | eflagsNT
+
+ // eflagsRestorable is the mask for the set of EFLAGS that may be changed by
+ // SignalReturn. eflagsRestorable is analogous to Linux's FIX_EFLAGS.
+ eflagsRestorable = eflagsAC | eflagsOF | eflagsDF | eflagsTF | eflagsSF | eflagsZF | eflagsAF | eflagsPF | eflagsCF | eflagsRF
+)
+
+// Segment selectors. See arch/x86/include/asm/segment.h.
+const (
+ userCS = 0x33 // guest ring 3 code selector
+ user32CS = 0x23 // guest ring 3 32 bit code selector
+ userDS = 0x2b // guest ring 3 data selector
+
+ _FS_TLS_SEL = 0x63 // Linux FS thread-local storage selector
+ _GS_TLS_SEL = 0x6b // Linux GS thread-local storage selector
+)
+
+var (
+ // TrapInstruction is the x86 trap instruction.
+ TrapInstruction = [1]byte{0xcc}
+
+ // CPUIDInstruction is the x86 CPUID instruction.
+ CPUIDInstruction = [2]byte{0xf, 0xa2}
+
+ // X86TrapFlag is an exported const for use by other packages.
+ X86TrapFlag uint64 = (1 << 8)
+)
+
+// x86FPState is x86 floating point state.
+type x86FPState []byte
+
+// initX86FPState (defined in asm files) sets up initial state.
+func initX86FPState(data *FloatingPointData, useXsave bool)
+
+func newX86FPStateSlice() []byte {
+ size, align := cpuid.HostFeatureSet().ExtendedStateSize()
+ capacity := size
+ // Always use at least 4096 bytes.
+ if capacity < 4096 {
+ capacity = 4096
+ }
+ return alignedBytes(capacity, align)[:size]
+}
+
+// newX86FPState returns an initialized floating point state.
+//
+// The returned state is large enough to store all floating point state
+// supported by host, even if the app won't use much of it due to a restricted
+// FeatureSet. Since they may still be able to see state not advertised by
+// CPUID we must ensure it does not contain any sentry state.
+func newX86FPState() x86FPState {
+ f := x86FPState(newX86FPStateSlice())
+ initX86FPState(f.FloatingPointData(), cpuid.HostFeatureSet().UseXsave())
+ return f
+}
+
+// fork creates and returns an identical copy of the x86 floating point state.
+func (f x86FPState) fork() x86FPState {
+ n := x86FPState(newX86FPStateSlice())
+ copy(n, f)
+ return n
+}
+
+// FloatingPointData returns the raw data pointer.
+func (f x86FPState) FloatingPointData() *FloatingPointData {
+ return (*FloatingPointData)(&f[0])
+}
+
+// NewFloatingPointData returns a new floating point data blob.
+//
+// This is primarily for use in tests.
+func NewFloatingPointData() *FloatingPointData {
+ return (*FloatingPointData)(&(newX86FPState()[0]))
+}
+
+// State contains the common architecture bits for X86 (the build tag of this
+// file ensures it's only built on x86).
+//
+// +stateify savable
+type State struct {
+ // The system registers.
+ Regs syscall.PtraceRegs `state:".(syscallPtraceRegs)"`
+
+ // Our floating point state.
+ x86FPState `state:"wait"`
+
+ // FeatureSet is a pointer to the currently active feature set.
+ FeatureSet *cpuid.FeatureSet
+}
+
+// Proto returns a protobuf representation of the system registers in State.
+func (s State) Proto() *rpb.Registers {
+ regs := &rpb.AMD64Registers{
+ Rax: s.Regs.Rax,
+ Rbx: s.Regs.Rbx,
+ Rcx: s.Regs.Rcx,
+ Rdx: s.Regs.Rdx,
+ Rsi: s.Regs.Rsi,
+ Rdi: s.Regs.Rdi,
+ Rsp: s.Regs.Rsp,
+ Rbp: s.Regs.Rbp,
+ R8: s.Regs.R8,
+ R9: s.Regs.R9,
+ R10: s.Regs.R10,
+ R11: s.Regs.R11,
+ R12: s.Regs.R12,
+ R13: s.Regs.R13,
+ R14: s.Regs.R14,
+ R15: s.Regs.R15,
+ Rip: s.Regs.Rip,
+ Rflags: s.Regs.Eflags,
+ OrigRax: s.Regs.Orig_rax,
+ Cs: s.Regs.Cs,
+ Ds: s.Regs.Ds,
+ Es: s.Regs.Es,
+ Fs: s.Regs.Fs,
+ Gs: s.Regs.Gs,
+ Ss: s.Regs.Ss,
+ FsBase: s.Regs.Fs_base,
+ GsBase: s.Regs.Gs_base,
+ }
+ return &rpb.Registers{Arch: &rpb.Registers_Amd64{Amd64: regs}}
+}
+
+// Fork creates and returns an identical copy of the state.
+func (s *State) Fork() State {
+ return State{
+ Regs: s.Regs,
+ x86FPState: s.x86FPState.fork(),
+ FeatureSet: s.FeatureSet,
+ }
+}
+
+// StateData implements Context.StateData.
+func (s *State) StateData() *State {
+ return s
+}
+
+// CPUIDEmulate emulates a cpuid instruction.
+func (s *State) CPUIDEmulate(l log.Logger) {
+ argax := uint32(s.Regs.Rax)
+ argcx := uint32(s.Regs.Rcx)
+ ax, bx, cx, dx := s.FeatureSet.EmulateID(argax, argcx)
+ s.Regs.Rax = uint64(ax)
+ s.Regs.Rbx = uint64(bx)
+ s.Regs.Rcx = uint64(cx)
+ s.Regs.Rdx = uint64(dx)
+ l.Debugf("CPUID(%x,%x): %x %x %x %x", argax, argcx, ax, bx, cx, dx)
+}
+
+// SingleStep implements Context.SingleStep.
+func (s *State) SingleStep() bool {
+ return s.Regs.Eflags&X86TrapFlag != 0
+}
+
+// SetSingleStep enables single stepping.
+func (s *State) SetSingleStep() {
+ // Set the trap flag.
+ s.Regs.Eflags |= X86TrapFlag
+}
+
+// ClearSingleStep enables single stepping.
+func (s *State) ClearSingleStep() {
+ // Clear the trap flag.
+ s.Regs.Eflags &= ^X86TrapFlag
+}
+
+// RegisterMap returns a map of all registers.
+func (s *State) RegisterMap() (map[string]uintptr, error) {
+ return map[string]uintptr{
+ "R15": uintptr(s.Regs.R15),
+ "R14": uintptr(s.Regs.R14),
+ "R13": uintptr(s.Regs.R13),
+ "R12": uintptr(s.Regs.R12),
+ "Rbp": uintptr(s.Regs.Rbp),
+ "Rbx": uintptr(s.Regs.Rbx),
+ "R11": uintptr(s.Regs.R11),
+ "R10": uintptr(s.Regs.R10),
+ "R9": uintptr(s.Regs.R9),
+ "R8": uintptr(s.Regs.R8),
+ "Rax": uintptr(s.Regs.Rax),
+ "Rcx": uintptr(s.Regs.Rcx),
+ "Rdx": uintptr(s.Regs.Rdx),
+ "Rsi": uintptr(s.Regs.Rsi),
+ "Rdi": uintptr(s.Regs.Rdi),
+ "Orig_rax": uintptr(s.Regs.Orig_rax),
+ "Rip": uintptr(s.Regs.Rip),
+ "Cs": uintptr(s.Regs.Cs),
+ "Eflags": uintptr(s.Regs.Eflags),
+ "Rsp": uintptr(s.Regs.Rsp),
+ "Ss": uintptr(s.Regs.Ss),
+ "Fs_base": uintptr(s.Regs.Fs_base),
+ "Gs_base": uintptr(s.Regs.Gs_base),
+ "Ds": uintptr(s.Regs.Ds),
+ "Es": uintptr(s.Regs.Es),
+ "Fs": uintptr(s.Regs.Fs),
+ "Gs": uintptr(s.Regs.Gs),
+ }, nil
+}
+
+// PtraceGetRegs implements Context.PtraceGetRegs.
+func (s *State) PtraceGetRegs(dst io.Writer) (int, error) {
+ return dst.Write(binary.Marshal(nil, usermem.ByteOrder, s.ptraceGetRegs()))
+}
+
+func (s *State) ptraceGetRegs() syscall.PtraceRegs {
+ regs := s.Regs
+ // These may not be initialized.
+ if regs.Cs == 0 || regs.Ss == 0 || regs.Eflags == 0 {
+ regs.Eflags = eflagsIF
+ regs.Cs = userCS
+ regs.Ss = userDS
+ }
+ // As an optimization, Linux <4.7 implements 32-bit fs_base/gs_base
+ // addresses using reserved descriptors in the GDT instead of the MSRs,
+ // with selector values FS_TLS_SEL and GS_TLS_SEL respectively. These
+ // values are actually visible in struct user_regs_struct::fs/gs;
+ // arch/x86/kernel/ptrace.c:getreg() doesn't attempt to sanitize struct
+ // thread_struct::fsindex/gsindex.
+ //
+ // We always use fs == gs == 0 when fs_base/gs_base is in use, for
+ // simplicity.
+ //
+ // Luckily, Linux <4.7 silently ignores setting fs/gs to 0 via
+ // arch/x86/kernel/ptrace.c:set_segment_reg() when fs_base/gs_base is a
+ // 32-bit value and fsindex/gsindex indicates that this optimization is
+ // in use, as well as the reverse case of setting fs/gs to
+ // FS/GS_TLS_SEL when fs_base/gs_base is a 64-bit value. (We do the
+ // same in PtraceSetRegs.)
+ //
+ // TODO(gvisor.dev/issue/168): Remove this fixup since newer Linux
+ // doesn't have this behavior anymore.
+ if regs.Fs == 0 && regs.Fs_base <= 0xffffffff {
+ regs.Fs = _FS_TLS_SEL
+ }
+ if regs.Gs == 0 && regs.Gs_base <= 0xffffffff {
+ regs.Gs = _GS_TLS_SEL
+ }
+ return regs
+}
+
+var ptraceRegsSize = int(binary.Size(syscall.PtraceRegs{}))
+
+// PtraceSetRegs implements Context.PtraceSetRegs.
+func (s *State) PtraceSetRegs(src io.Reader) (int, error) {
+ var regs syscall.PtraceRegs
+ buf := make([]byte, ptraceRegsSize)
+ if _, err := io.ReadFull(src, buf); err != nil {
+ return 0, err
+ }
+ binary.Unmarshal(buf, usermem.ByteOrder, &regs)
+ // Truncate segment registers to 16 bits.
+ regs.Cs = uint64(uint16(regs.Cs))
+ regs.Ds = uint64(uint16(regs.Ds))
+ regs.Es = uint64(uint16(regs.Es))
+ regs.Fs = uint64(uint16(regs.Fs))
+ regs.Gs = uint64(uint16(regs.Gs))
+ regs.Ss = uint64(uint16(regs.Ss))
+ // In Linux this validation is via arch/x86/kernel/ptrace.c:putreg().
+ if !isUserSegmentSelector(regs.Cs) {
+ return 0, syscall.EIO
+ }
+ if regs.Ds != 0 && !isUserSegmentSelector(regs.Ds) {
+ return 0, syscall.EIO
+ }
+ if regs.Es != 0 && !isUserSegmentSelector(regs.Es) {
+ return 0, syscall.EIO
+ }
+ if regs.Fs != 0 && !isUserSegmentSelector(regs.Fs) {
+ return 0, syscall.EIO
+ }
+ if regs.Gs != 0 && !isUserSegmentSelector(regs.Gs) {
+ return 0, syscall.EIO
+ }
+ if !isUserSegmentSelector(regs.Ss) {
+ return 0, syscall.EIO
+ }
+ if !isValidSegmentBase(regs.Fs_base) {
+ return 0, syscall.EIO
+ }
+ if !isValidSegmentBase(regs.Gs_base) {
+ return 0, syscall.EIO
+ }
+ // CS and SS are validated, but changes to them are otherwise silently
+ // ignored on amd64.
+ regs.Cs = s.Regs.Cs
+ regs.Ss = s.Regs.Ss
+ // fs_base/gs_base changes reset fs/gs via do_arch_prctl() on Linux.
+ if regs.Fs_base != s.Regs.Fs_base {
+ regs.Fs = 0
+ }
+ if regs.Gs_base != s.Regs.Gs_base {
+ regs.Gs = 0
+ }
+ // Ignore "stale" TLS segment selectors for FS and GS. See comment in
+ // ptraceGetRegs.
+ if regs.Fs == _FS_TLS_SEL && regs.Fs_base != 0 {
+ regs.Fs = 0
+ }
+ if regs.Gs == _GS_TLS_SEL && regs.Gs_base != 0 {
+ regs.Gs = 0
+ }
+ regs.Eflags = (s.Regs.Eflags &^ eflagsPtraceMutable) | (regs.Eflags & eflagsPtraceMutable)
+ s.Regs = regs
+ return ptraceRegsSize, nil
+}
+
+// isUserSegmentSelector returns true if the given segment selector specifies a
+// privilege level of 3 (USER_RPL).
+func isUserSegmentSelector(reg uint64) bool {
+ return reg&3 == 3
+}
+
+// isValidSegmentBase returns true if the given segment base specifies a
+// canonical user address.
+func isValidSegmentBase(reg uint64) bool {
+ return reg < uint64(maxAddr64)
+}
+
+// ptraceFPRegsSize is the size in bytes of Linux's user_i387_struct, the type
+// manipulated by PTRACE_GETFPREGS and PTRACE_SETFPREGS on x86. Equivalently,
+// ptraceFPRegsSize is the size in bytes of the x86 FXSAVE area.
+const ptraceFPRegsSize = 512
+
+// PtraceGetFPRegs implements Context.PtraceGetFPRegs.
+func (s *State) PtraceGetFPRegs(dst io.Writer) (int, error) {
+ return dst.Write(s.x86FPState[:ptraceFPRegsSize])
+}
+
+// PtraceSetFPRegs implements Context.PtraceSetFPRegs.
+func (s *State) PtraceSetFPRegs(src io.Reader) (int, error) {
+ var f [ptraceFPRegsSize]byte
+ n, err := io.ReadFull(src, f[:])
+ if err != nil {
+ return 0, err
+ }
+ // Force reserved bits in MXCSR to 0. This is consistent with Linux.
+ sanitizeMXCSR(x86FPState(f[:]))
+ // N.B. this only copies the beginning of the FP state, which
+ // corresponds to the FXSAVE area.
+ copy(s.x86FPState, f[:])
+ return n, nil
+}
+
+const (
+ // mxcsrOffset is the offset in bytes of the MXCSR field from the start of
+ // the FXSAVE area. (Intel SDM Vol. 1, Table 10-2 "Format of an FXSAVE
+ // Area")
+ mxcsrOffset = 24
+
+ // mxcsrMaskOffset is the offset in bytes of the MXCSR_MASK field from the
+ // start of the FXSAVE area.
+ mxcsrMaskOffset = 28
+)
+
+var (
+ mxcsrMask uint32
+ initMXCSRMask sync.Once
+)
+
+// sanitizeMXCSR coerces reserved bits in the MXCSR field of f to 0. ("FXRSTOR
+// generates a general-protection fault (#GP) in response to an attempt to set
+// any of the reserved bits of the MXCSR register." - Intel SDM Vol. 1, Section
+// 10.5.1.2 "SSE State")
+func sanitizeMXCSR(f x86FPState) {
+ mxcsr := usermem.ByteOrder.Uint32(f[mxcsrOffset:])
+ initMXCSRMask.Do(func() {
+ temp := x86FPState(alignedBytes(uint(ptraceFPRegsSize), 16))
+ initX86FPState(temp.FloatingPointData(), false /* useXsave */)
+ mxcsrMask = usermem.ByteOrder.Uint32(temp[mxcsrMaskOffset:])
+ if mxcsrMask == 0 {
+ // "If the value of the MXCSR_MASK field is 00000000H, then the
+ // MXCSR_MASK value is the default value of 0000FFBFH." - Intel SDM
+ // Vol. 1, Section 11.6.6 "Guidelines for Writing to the MXCSR
+ // Register"
+ mxcsrMask = 0xffbf
+ }
+ })
+ mxcsr &= mxcsrMask
+ usermem.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)
+}
+
+const (
+ // minXstateBytes is the minimum size in bytes of an x86 XSAVE area, equal
+ // to the size of the XSAVE legacy area (512 bytes) plus the size of the
+ // XSAVE header (64 bytes). Equivalently, minXstateBytes is GDB's
+ // X86_XSTATE_SSE_SIZE.
+ minXstateBytes = 512 + 64
+
+ // userXstateXCR0Offset is the offset in bytes of the USER_XSTATE_XCR0_WORD
+ // field in Linux's struct user_xstateregs, which is the type manipulated
+ // by ptrace(PTRACE_GET/SETREGSET, NT_X86_XSTATE). Equivalently,
+ // userXstateXCR0Offset is GDB's I386_LINUX_XSAVE_XCR0_OFFSET.
+ userXstateXCR0Offset = 464
+
+ // xstateBVOffset is the offset in bytes of the XSTATE_BV field in an x86
+ // XSAVE area.
+ xstateBVOffset = 512
+
+ // xsaveHeaderZeroedOffset and xsaveHeaderZeroedBytes indicate parts of the
+ // XSAVE header that we coerce to zero: "Bytes 15:8 of the XSAVE header is
+ // a state-component bitmap called XCOMP_BV. ... Bytes 63:16 of the XSAVE
+ // header are reserved." - Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header".
+ // Linux ignores XCOMP_BV, but it's able to recover from XRSTOR #GP
+ // exceptions resulting from invalid values; we aren't. Linux also never
+ // uses the compacted format when doing XSAVE and doesn't even define the
+ // compaction extensions to XSAVE as a CPU feature, so for simplicity we
+ // assume no one is using them.
+ xsaveHeaderZeroedOffset = 512 + 8
+ xsaveHeaderZeroedBytes = 64 - 8
+)
+
+func (s *State) ptraceGetXstateRegs(dst io.Writer, maxlen int) (int, error) {
+ // N.B. s.x86FPState may contain more state than the application
+ // expects. We only copy the subset that would be in their XSAVE area.
+ ess, _ := s.FeatureSet.ExtendedStateSize()
+ f := make([]byte, ess)
+ copy(f, s.x86FPState)
+ // "The XSAVE feature set does not use bytes 511:416; bytes 463:416 are
+ // reserved." - Intel SDM Vol 1., Section 13.4.1 "Legacy Region of an XSAVE
+ // Area". Linux uses the first 8 bytes of this area to store the OS XSTATE
+ // mask. GDB relies on this: see
+ // gdb/x86-linux-nat.c:x86_linux_read_description().
+ usermem.ByteOrder.PutUint64(f[userXstateXCR0Offset:], s.FeatureSet.ValidXCR0Mask())
+ if len(f) > maxlen {
+ f = f[:maxlen]
+ }
+ return dst.Write(f)
+}
+
+func (s *State) ptraceSetXstateRegs(src io.Reader, maxlen int) (int, error) {
+ // Allow users to pass an xstate register set smaller than ours (they can
+ // mask bits out of XSTATE_BV), as long as it's at least minXstateBytes.
+ // Also allow users to pass a register set larger than ours; anything after
+ // their ExtendedStateSize will be ignored. (I think Linux technically
+ // permits setting a register set smaller than minXstateBytes, but it has
+ // the same silent truncation behavior in kernel/ptrace.c:ptrace_regset().)
+ if maxlen < minXstateBytes {
+ return 0, syscall.EFAULT
+ }
+ ess, _ := s.FeatureSet.ExtendedStateSize()
+ if maxlen > int(ess) {
+ maxlen = int(ess)
+ }
+ f := make([]byte, maxlen)
+ if _, err := io.ReadFull(src, f); err != nil {
+ return 0, err
+ }
+ // Force reserved bits in MXCSR to 0. This is consistent with Linux.
+ sanitizeMXCSR(x86FPState(f))
+ // Users can't enable *more* XCR0 bits than what we, and the CPU, support.
+ xstateBV := usermem.ByteOrder.Uint64(f[xstateBVOffset:])
+ xstateBV &= s.FeatureSet.ValidXCR0Mask()
+ usermem.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV)
+ // Force XCOMP_BV and reserved bytes in the XSAVE header to 0.
+ reserved := f[xsaveHeaderZeroedOffset : xsaveHeaderZeroedOffset+xsaveHeaderZeroedBytes]
+ for i := range reserved {
+ reserved[i] = 0
+ }
+ return copy(s.x86FPState, f), nil
+}
+
+// Register sets defined in include/uapi/linux/elf.h.
+const (
+ _NT_PRSTATUS = 1
+ _NT_PRFPREG = 2
+ _NT_X86_XSTATE = 0x202
+)
+
+// PtraceGetRegSet implements Context.PtraceGetRegSet.
+func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) {
+ switch regset {
+ case _NT_PRSTATUS:
+ if maxlen < ptraceRegsSize {
+ return 0, syserror.EFAULT
+ }
+ return s.PtraceGetRegs(dst)
+ case _NT_PRFPREG:
+ if maxlen < ptraceFPRegsSize {
+ return 0, syserror.EFAULT
+ }
+ return s.PtraceGetFPRegs(dst)
+ case _NT_X86_XSTATE:
+ return s.ptraceGetXstateRegs(dst, maxlen)
+ default:
+ return 0, syserror.EINVAL
+ }
+}
+
+// PtraceSetRegSet implements Context.PtraceSetRegSet.
+func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) {
+ switch regset {
+ case _NT_PRSTATUS:
+ if maxlen < ptraceRegsSize {
+ return 0, syserror.EFAULT
+ }
+ return s.PtraceSetRegs(src)
+ case _NT_PRFPREG:
+ if maxlen < ptraceFPRegsSize {
+ return 0, syserror.EFAULT
+ }
+ return s.PtraceSetFPRegs(src)
+ case _NT_X86_XSTATE:
+ return s.ptraceSetXstateRegs(src, maxlen)
+ default:
+ return 0, syserror.EINVAL
+ }
+}
+
+// FullRestore indicates whether a full restore is required.
+func (s *State) FullRestore() bool {
+ // A fast system call return is possible only if
+ //
+ // * RCX matches the instruction pointer.
+ // * R11 matches our flags value.
+ // * Usermode does not expect to set either the resume flag or the
+ // virtual mode flags (unlikely.)
+ // * CS and SS are set to the standard selectors.
+ //
+ // That is, SYSRET results in the correct final state.
+ fastRestore := s.Regs.Rcx == s.Regs.Rip &&
+ s.Regs.Eflags == s.Regs.R11 &&
+ (s.Regs.Eflags&eflagsRF == 0) &&
+ (s.Regs.Eflags&eflagsVM == 0) &&
+ s.Regs.Cs == userCS &&
+ s.Regs.Ss == userDS
+ return !fastRestore
+}
+
+// New returns a new architecture context.
+func New(arch Arch, fs *cpuid.FeatureSet) Context {
+ switch arch {
+ case AMD64:
+ return &context64{
+ State{
+ x86FPState: newX86FPState(),
+ FeatureSet: fs,
+ },
+ []x86FPState(nil),
+ }
+ }
+ panic(fmt.Sprintf("unknown architecture %v", arch))
+}
diff --git a/pkg/sentry/arch/auxv.go b/pkg/sentry/arch/auxv.go
new file mode 100644
index 000000000..80c923103
--- /dev/null
+++ b/pkg/sentry/arch/auxv.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+import (
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// An AuxEntry represents an entry in an ELF auxiliary vector.
+//
+// +stateify savable
+type AuxEntry struct {
+ Key uint64
+ Value usermem.Addr
+}
+
+// An Auxv represents an ELF auxiliary vector.
+type Auxv []AuxEntry
diff --git a/pkg/sentry/arch/registers_go_proto/registers.pb.go b/pkg/sentry/arch/registers_go_proto/registers.pb.go
new file mode 100755
index 000000000..088209be7
--- /dev/null
+++ b/pkg/sentry/arch/registers_go_proto/registers.pb.go
@@ -0,0 +1,367 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: pkg/sentry/arch/registers.proto
+
+package gvisor
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AMD64Registers struct {
+ Rax uint64 `protobuf:"varint,1,opt,name=rax,proto3" json:"rax,omitempty"`
+ Rbx uint64 `protobuf:"varint,2,opt,name=rbx,proto3" json:"rbx,omitempty"`
+ Rcx uint64 `protobuf:"varint,3,opt,name=rcx,proto3" json:"rcx,omitempty"`
+ Rdx uint64 `protobuf:"varint,4,opt,name=rdx,proto3" json:"rdx,omitempty"`
+ Rsi uint64 `protobuf:"varint,5,opt,name=rsi,proto3" json:"rsi,omitempty"`
+ Rdi uint64 `protobuf:"varint,6,opt,name=rdi,proto3" json:"rdi,omitempty"`
+ Rsp uint64 `protobuf:"varint,7,opt,name=rsp,proto3" json:"rsp,omitempty"`
+ Rbp uint64 `protobuf:"varint,8,opt,name=rbp,proto3" json:"rbp,omitempty"`
+ R8 uint64 `protobuf:"varint,9,opt,name=r8,proto3" json:"r8,omitempty"`
+ R9 uint64 `protobuf:"varint,10,opt,name=r9,proto3" json:"r9,omitempty"`
+ R10 uint64 `protobuf:"varint,11,opt,name=r10,proto3" json:"r10,omitempty"`
+ R11 uint64 `protobuf:"varint,12,opt,name=r11,proto3" json:"r11,omitempty"`
+ R12 uint64 `protobuf:"varint,13,opt,name=r12,proto3" json:"r12,omitempty"`
+ R13 uint64 `protobuf:"varint,14,opt,name=r13,proto3" json:"r13,omitempty"`
+ R14 uint64 `protobuf:"varint,15,opt,name=r14,proto3" json:"r14,omitempty"`
+ R15 uint64 `protobuf:"varint,16,opt,name=r15,proto3" json:"r15,omitempty"`
+ Rip uint64 `protobuf:"varint,17,opt,name=rip,proto3" json:"rip,omitempty"`
+ Rflags uint64 `protobuf:"varint,18,opt,name=rflags,proto3" json:"rflags,omitempty"`
+ OrigRax uint64 `protobuf:"varint,19,opt,name=orig_rax,json=origRax,proto3" json:"orig_rax,omitempty"`
+ Cs uint64 `protobuf:"varint,20,opt,name=cs,proto3" json:"cs,omitempty"`
+ Ds uint64 `protobuf:"varint,21,opt,name=ds,proto3" json:"ds,omitempty"`
+ Es uint64 `protobuf:"varint,22,opt,name=es,proto3" json:"es,omitempty"`
+ Fs uint64 `protobuf:"varint,23,opt,name=fs,proto3" json:"fs,omitempty"`
+ Gs uint64 `protobuf:"varint,24,opt,name=gs,proto3" json:"gs,omitempty"`
+ Ss uint64 `protobuf:"varint,25,opt,name=ss,proto3" json:"ss,omitempty"`
+ FsBase uint64 `protobuf:"varint,26,opt,name=fs_base,json=fsBase,proto3" json:"fs_base,omitempty"`
+ GsBase uint64 `protobuf:"varint,27,opt,name=gs_base,json=gsBase,proto3" json:"gs_base,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AMD64Registers) Reset() { *m = AMD64Registers{} }
+func (m *AMD64Registers) String() string { return proto.CompactTextString(m) }
+func (*AMD64Registers) ProtoMessage() {}
+func (*AMD64Registers) Descriptor() ([]byte, []int) {
+ return fileDescriptor_082b7510610e0457, []int{0}
+}
+
+func (m *AMD64Registers) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AMD64Registers.Unmarshal(m, b)
+}
+func (m *AMD64Registers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AMD64Registers.Marshal(b, m, deterministic)
+}
+func (m *AMD64Registers) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AMD64Registers.Merge(m, src)
+}
+func (m *AMD64Registers) XXX_Size() int {
+ return xxx_messageInfo_AMD64Registers.Size(m)
+}
+func (m *AMD64Registers) XXX_DiscardUnknown() {
+ xxx_messageInfo_AMD64Registers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AMD64Registers proto.InternalMessageInfo
+
+func (m *AMD64Registers) GetRax() uint64 {
+ if m != nil {
+ return m.Rax
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRbx() uint64 {
+ if m != nil {
+ return m.Rbx
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRcx() uint64 {
+ if m != nil {
+ return m.Rcx
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRdx() uint64 {
+ if m != nil {
+ return m.Rdx
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRsi() uint64 {
+ if m != nil {
+ return m.Rsi
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRdi() uint64 {
+ if m != nil {
+ return m.Rdi
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRsp() uint64 {
+ if m != nil {
+ return m.Rsp
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRbp() uint64 {
+ if m != nil {
+ return m.Rbp
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR8() uint64 {
+ if m != nil {
+ return m.R8
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR9() uint64 {
+ if m != nil {
+ return m.R9
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR10() uint64 {
+ if m != nil {
+ return m.R10
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR11() uint64 {
+ if m != nil {
+ return m.R11
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR12() uint64 {
+ if m != nil {
+ return m.R12
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR13() uint64 {
+ if m != nil {
+ return m.R13
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR14() uint64 {
+ if m != nil {
+ return m.R14
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetR15() uint64 {
+ if m != nil {
+ return m.R15
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRip() uint64 {
+ if m != nil {
+ return m.Rip
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetRflags() uint64 {
+ if m != nil {
+ return m.Rflags
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetOrigRax() uint64 {
+ if m != nil {
+ return m.OrigRax
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetCs() uint64 {
+ if m != nil {
+ return m.Cs
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetDs() uint64 {
+ if m != nil {
+ return m.Ds
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetEs() uint64 {
+ if m != nil {
+ return m.Es
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetFs() uint64 {
+ if m != nil {
+ return m.Fs
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetGs() uint64 {
+ if m != nil {
+ return m.Gs
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetSs() uint64 {
+ if m != nil {
+ return m.Ss
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetFsBase() uint64 {
+ if m != nil {
+ return m.FsBase
+ }
+ return 0
+}
+
+func (m *AMD64Registers) GetGsBase() uint64 {
+ if m != nil {
+ return m.GsBase
+ }
+ return 0
+}
+
+type Registers struct {
+ // Types that are valid to be assigned to Arch:
+ // *Registers_Amd64
+ Arch isRegisters_Arch `protobuf_oneof:"arch"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Registers) Reset() { *m = Registers{} }
+func (m *Registers) String() string { return proto.CompactTextString(m) }
+func (*Registers) ProtoMessage() {}
+func (*Registers) Descriptor() ([]byte, []int) {
+ return fileDescriptor_082b7510610e0457, []int{1}
+}
+
+func (m *Registers) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Registers.Unmarshal(m, b)
+}
+func (m *Registers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Registers.Marshal(b, m, deterministic)
+}
+func (m *Registers) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Registers.Merge(m, src)
+}
+func (m *Registers) XXX_Size() int {
+ return xxx_messageInfo_Registers.Size(m)
+}
+func (m *Registers) XXX_DiscardUnknown() {
+ xxx_messageInfo_Registers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Registers proto.InternalMessageInfo
+
+type isRegisters_Arch interface {
+ isRegisters_Arch()
+}
+
+type Registers_Amd64 struct {
+ Amd64 *AMD64Registers `protobuf:"bytes,1,opt,name=amd64,proto3,oneof"`
+}
+
+func (*Registers_Amd64) isRegisters_Arch() {}
+
+func (m *Registers) GetArch() isRegisters_Arch {
+ if m != nil {
+ return m.Arch
+ }
+ return nil
+}
+
+func (m *Registers) GetAmd64() *AMD64Registers {
+ if x, ok := m.GetArch().(*Registers_Amd64); ok {
+ return x.Amd64
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Registers) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Registers_Amd64)(nil),
+ }
+}
+
+func init() {
+ proto.RegisterType((*AMD64Registers)(nil), "gvisor.AMD64Registers")
+ proto.RegisterType((*Registers)(nil), "gvisor.Registers")
+}
+
+func init() { proto.RegisterFile("pkg/sentry/arch/registers.proto", fileDescriptor_082b7510610e0457) }
+
+var fileDescriptor_082b7510610e0457 = []byte{
+ // 354 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x4d, 0x6f, 0xe2, 0x30,
+ 0x10, 0x86, 0x17, 0x08, 0x01, 0xcc, 0x2e, 0xcb, 0x66, 0x5b, 0x18, 0xda, 0x43, 0x2b, 0x4e, 0x3d,
+ 0x05, 0x02, 0x01, 0xc1, 0xb1, 0xb4, 0x87, 0x5e, 0x7a, 0xc9, 0x1f, 0x40, 0xf9, 0x74, 0xad, 0x7e,
+ 0xc4, 0xf2, 0xa0, 0x2a, 0x3d, 0xf7, 0x8f, 0x57, 0xf6, 0xd8, 0xaa, 0x7a, 0xcb, 0xf3, 0xcc, 0x6b,
+ 0x39, 0x93, 0xbc, 0xec, 0x4a, 0x3e, 0xf3, 0x05, 0x96, 0x6f, 0x27, 0xf5, 0xb1, 0x48, 0x55, 0xfe,
+ 0xb4, 0x50, 0x25, 0x17, 0x78, 0x2a, 0x15, 0x86, 0x52, 0xd5, 0xa7, 0x3a, 0xf0, 0xf9, 0xbb, 0xc0,
+ 0x5a, 0xcd, 0x3f, 0x3d, 0x36, 0xba, 0x7d, 0xbc, 0xdf, 0xc6, 0x89, 0x0b, 0x04, 0x63, 0xd6, 0x51,
+ 0x69, 0x03, 0xad, 0xeb, 0xd6, 0x8d, 0x97, 0xe8, 0x47, 0x63, 0xb2, 0x06, 0xda, 0xd6, 0x64, 0x64,
+ 0xf2, 0x06, 0x3a, 0xd6, 0xe4, 0x64, 0x8a, 0x06, 0x3c, 0x6b, 0x0a, 0x32, 0x28, 0xa0, 0x6b, 0x0d,
+ 0x0a, 0xca, 0x08, 0xf0, 0x5d, 0x86, 0x0c, 0x4a, 0xe8, 0xb9, 0x8c, 0xa4, 0xbb, 0x24, 0xf4, 0xdd,
+ 0x5d, 0x32, 0x18, 0xb1, 0xb6, 0xda, 0xc1, 0xc0, 0x88, 0xb6, 0xda, 0x19, 0xde, 0x03, 0xb3, 0xbc,
+ 0x37, 0x27, 0xa2, 0x25, 0x0c, 0xed, 0x89, 0x68, 0x49, 0x26, 0x82, 0xdf, 0xce, 0x44, 0x64, 0x56,
+ 0xf0, 0xc7, 0x99, 0x15, 0x99, 0x35, 0x8c, 0x9c, 0x59, 0x93, 0x89, 0xe1, 0xaf, 0x33, 0x31, 0x99,
+ 0x0d, 0x8c, 0x9d, 0xd9, 0x18, 0x23, 0x24, 0xfc, 0xb3, 0x46, 0xc8, 0x60, 0xc2, 0x7c, 0x55, 0xbd,
+ 0xa4, 0x1c, 0x21, 0x30, 0xd2, 0x52, 0x30, 0x63, 0xfd, 0x5a, 0x09, 0x7e, 0xd4, 0x9f, 0xf2, 0xbf,
+ 0x99, 0xf4, 0x34, 0x27, 0x69, 0xa3, 0x17, 0xc8, 0x11, 0xce, 0x68, 0x81, 0x1c, 0x35, 0x17, 0x08,
+ 0xe7, 0xc4, 0x85, 0xe1, 0x12, 0x61, 0x42, 0x5c, 0x1a, 0xae, 0x10, 0xa6, 0xc4, 0x95, 0x61, 0x8e,
+ 0x00, 0xc4, 0xdc, 0x30, 0x22, 0xcc, 0x88, 0x11, 0x83, 0x29, 0xeb, 0x55, 0x78, 0xcc, 0x52, 0x2c,
+ 0xe1, 0x82, 0xde, 0xa9, 0xc2, 0x43, 0x8a, 0xa5, 0x1e, 0x70, 0x3b, 0xb8, 0xa4, 0x01, 0x37, 0x83,
+ 0xf9, 0x1d, 0x1b, 0x7c, 0xff, 0xff, 0x90, 0x75, 0xd3, 0xd7, 0x62, 0x1b, 0x9b, 0x06, 0x0c, 0x57,
+ 0x93, 0x90, 0xaa, 0x12, 0xfe, 0xac, 0xc9, 0xc3, 0xaf, 0x84, 0x62, 0x07, 0x9f, 0x79, 0xba, 0x62,
+ 0x99, 0x6f, 0x9a, 0xb5, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0xb4, 0xcc, 0x03, 0x27, 0x7c, 0x02,
+ 0x00, 0x00,
+}
diff --git a/pkg/sentry/arch/signal_act.go b/pkg/sentry/arch/signal_act.go
new file mode 100644
index 000000000..f9ca2e74e
--- /dev/null
+++ b/pkg/sentry/arch/signal_act.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+// Special values for SignalAct.Handler.
+const (
+ // SignalActDefault is SIG_DFL and specifies that the default behavior for
+ // a signal should be taken.
+ SignalActDefault = 0
+
+ // SignalActIgnore is SIG_IGN and specifies that a signal should be
+ // ignored.
+ SignalActIgnore = 1
+)
+
+// Available signal flags.
+const (
+ SignalFlagNoCldStop = 0x00000001
+ SignalFlagNoCldWait = 0x00000002
+ SignalFlagSigInfo = 0x00000004
+ SignalFlagRestorer = 0x04000000
+ SignalFlagOnStack = 0x08000000
+ SignalFlagRestart = 0x10000000
+ SignalFlagInterrupt = 0x20000000
+ SignalFlagNoDefer = 0x40000000
+ SignalFlagResetHandler = 0x80000000
+)
+
+// IsSigInfo returns true iff this handle expects siginfo.
+func (s SignalAct) IsSigInfo() bool {
+ return s.Flags&SignalFlagSigInfo != 0
+}
+
+// IsNoDefer returns true iff this SignalAct has the NoDefer flag set.
+func (s SignalAct) IsNoDefer() bool {
+ return s.Flags&SignalFlagNoDefer != 0
+}
+
+// IsRestart returns true iff this SignalAct has the Restart flag set.
+func (s SignalAct) IsRestart() bool {
+ return s.Flags&SignalFlagRestart != 0
+}
+
+// IsResetHandler returns true iff this SignalAct has the ResetHandler flag set.
+func (s SignalAct) IsResetHandler() bool {
+ return s.Flags&SignalFlagResetHandler != 0
+}
+
+// IsOnStack returns true iff this SignalAct has the OnStack flag set.
+func (s SignalAct) IsOnStack() bool {
+ return s.Flags&SignalFlagOnStack != 0
+}
+
+// HasRestorer returns true iff this SignalAct has the Restorer flag set.
+func (s SignalAct) HasRestorer() bool {
+ return s.Flags&SignalFlagRestorer != 0
+}
+
+// NativeSignalAct is a type that is equivalent to struct sigaction in the
+// guest architecture.
+type NativeSignalAct interface {
+ // SerializeFrom copies the data in the host SignalAct s into this object.
+ SerializeFrom(s *SignalAct)
+
+ // DeserializeTo copies the data in this object into the host SignalAct s.
+ DeserializeTo(s *SignalAct)
+}
diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go
new file mode 100644
index 000000000..aa030fd70
--- /dev/null
+++ b/pkg/sentry/arch/signal_amd64.go
@@ -0,0 +1,521 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+package arch
+
+import (
+ "encoding/binary"
+ "math"
+ "syscall"
+
+ "gvisor.googlesource.com/gvisor/pkg/abi/linux"
+ "gvisor.googlesource.com/gvisor/pkg/log"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// SignalAct represents the action that should be taken when a signal is
+// delivered, and is equivalent to struct sigaction on 64-bit x86.
+//
+// +stateify savable
+type SignalAct struct {
+ Handler uint64
+ Flags uint64
+ Restorer uint64
+ Mask linux.SignalSet
+}
+
+// SerializeFrom implements NativeSignalAct.SerializeFrom.
+func (s *SignalAct) SerializeFrom(other *SignalAct) {
+ *s = *other
+}
+
+// DeserializeTo implements NativeSignalAct.DeserializeTo.
+func (s *SignalAct) DeserializeTo(other *SignalAct) {
+ *other = *s
+}
+
+// SignalStack represents information about a user stack, and is equivalent to
+// stack_t on 64-bit x86.
+//
+// +stateify savable
+type SignalStack struct {
+ Addr uint64
+ Flags uint32
+ _ uint32
+ Size uint64
+}
+
+// SerializeFrom implements NativeSignalStack.SerializeFrom.
+func (s *SignalStack) SerializeFrom(other *SignalStack) {
+ *s = *other
+}
+
+// DeserializeTo implements NativeSignalStack.DeserializeTo.
+func (s *SignalStack) DeserializeTo(other *SignalStack) {
+ *other = *s
+}
+
+// SignalInfo represents information about a signal being delivered, and is
+// equivalent to struct siginfo on 64-bit x86.
+//
+// +stateify savable
+type SignalInfo struct {
+ Signo int32 // Signal number
+ Errno int32 // Errno value
+ Code int32 // Signal code
+ _ uint32
+
+ // struct siginfo::_sifields is a union. In SignalInfo, fields in the union
+ // are accessed through methods.
+ //
+ // For reference, here is the definition of _sifields: (_sigfault._trapno,
+ // which does not exist on x86, omitted for clarity)
+ //
+ // union {
+ // int _pad[SI_PAD_SIZE];
+ //
+ // /* kill() */
+ // struct {
+ // __kernel_pid_t _pid; /* sender's pid */
+ // __ARCH_SI_UID_T _uid; /* sender's uid */
+ // } _kill;
+ //
+ // /* POSIX.1b timers */
+ // struct {
+ // __kernel_timer_t _tid; /* timer id */
+ // int _overrun; /* overrun count */
+ // char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ // sigval_t _sigval; /* same as below */
+ // int _sys_private; /* not to be passed to user */
+ // } _timer;
+ //
+ // /* POSIX.1b signals */
+ // struct {
+ // __kernel_pid_t _pid; /* sender's pid */
+ // __ARCH_SI_UID_T _uid; /* sender's uid */
+ // sigval_t _sigval;
+ // } _rt;
+ //
+ // /* SIGCHLD */
+ // struct {
+ // __kernel_pid_t _pid; /* which child */
+ // __ARCH_SI_UID_T _uid; /* sender's uid */
+ // int _status; /* exit code */
+ // __ARCH_SI_CLOCK_T _utime;
+ // __ARCH_SI_CLOCK_T _stime;
+ // } _sigchld;
+ //
+ // /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ // struct {
+ // void *_addr; /* faulting insn/memory ref. */
+ // short _addr_lsb; /* LSB of the reported address */
+ // } _sigfault;
+ //
+ // /* SIGPOLL */
+ // struct {
+ // __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ // int _fd;
+ // } _sigpoll;
+ //
+ // /* SIGSYS */
+ // struct {
+ // void *_call_addr; /* calling user insn */
+ // int _syscall; /* triggering system call number */
+ // unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ // } _sigsys;
+ // } _sifields;
+ //
+ // _sifields is padded so that the size of siginfo is SI_MAX_SIZE = 128
+ // bytes.
+ Fields [128 - 16]byte
+}
+
+// FixSignalCodeForUser fixes up si_code.
+//
+// The si_code we get from Linux may contain the kernel-specific code in the
+// top 16 bits if it's positive (e.g., from ptrace). Linux's
+// copy_siginfo_to_user does
+// err |= __put_user((short)from->si_code, &to->si_code);
+// to mask out those bits and we need to do the same.
+func (s *SignalInfo) FixSignalCodeForUser() {
+ if s.Code > 0 {
+ s.Code &= 0x0000ffff
+ }
+}
+
+// Pid returns the si_pid field.
+func (s *SignalInfo) Pid() int32 {
+ return int32(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+}
+
+// SetPid mutates the si_pid field.
+func (s *SignalInfo) SetPid(val int32) {
+ usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+}
+
+// Uid returns the si_uid field.
+func (s *SignalInfo) Uid() int32 {
+ return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+}
+
+// SetUid mutates the si_uid field.
+func (s *SignalInfo) SetUid(val int32) {
+ usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+}
+
+// Sigval returns the sigval field, which is aliased to both si_int and si_ptr.
+func (s *SignalInfo) Sigval() uint64 {
+ return usermem.ByteOrder.Uint64(s.Fields[8:16])
+}
+
+// SetSigval mutates the sigval field.
+func (s *SignalInfo) SetSigval(val uint64) {
+ usermem.ByteOrder.PutUint64(s.Fields[8:16], val)
+}
+
+// TimerID returns the si_timerid field.
+func (s *SignalInfo) TimerID() linux.TimerID {
+ return linux.TimerID(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+}
+
+// SetTimerID sets the si_timerid field.
+func (s *SignalInfo) SetTimerID(val linux.TimerID) {
+ usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+}
+
+// Overrun returns the si_overrun field.
+func (s *SignalInfo) Overrun() int32 {
+ return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+}
+
+// SetOverrun sets the si_overrun field.
+func (s *SignalInfo) SetOverrun(val int32) {
+ usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+}
+
+// Addr returns the si_addr field.
+func (s *SignalInfo) Addr() uint64 {
+ return usermem.ByteOrder.Uint64(s.Fields[0:8])
+}
+
+// SetAddr sets the si_addr field.
+func (s *SignalInfo) SetAddr(val uint64) {
+ usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+}
+
+// Status returns the si_status field.
+func (s *SignalInfo) Status() int32 {
+ return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+}
+
+// SetStatus mutates the si_status field.
+func (s *SignalInfo) SetStatus(val int32) {
+ usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+}
+
+// CallAddr returns the si_call_addr field.
+func (s *SignalInfo) CallAddr() uint64 {
+ return usermem.ByteOrder.Uint64(s.Fields[0:8])
+}
+
+// SetCallAddr mutates the si_call_addr field.
+func (s *SignalInfo) SetCallAddr(val uint64) {
+ usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+}
+
+// Syscall returns the si_syscall field.
+func (s *SignalInfo) Syscall() int32 {
+ return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+}
+
+// SetSyscall mutates the si_syscall field.
+func (s *SignalInfo) SetSyscall(val int32) {
+ usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+}
+
+// Arch returns the si_arch field.
+func (s *SignalInfo) Arch() uint32 {
+ return usermem.ByteOrder.Uint32(s.Fields[12:16])
+}
+
+// SetArch mutates the si_arch field.
+func (s *SignalInfo) SetArch(val uint32) {
+ usermem.ByteOrder.PutUint32(s.Fields[12:16], val)
+}
+
+// SignalContext64 is equivalent to struct sigcontext, the type passed as the
+// second argument to signal handlers set by signal(2).
+type SignalContext64 struct {
+ R8 uint64
+ R9 uint64
+ R10 uint64
+ R11 uint64
+ R12 uint64
+ R13 uint64
+ R14 uint64
+ R15 uint64
+ Rdi uint64
+ Rsi uint64
+ Rbp uint64
+ Rbx uint64
+ Rdx uint64
+ Rax uint64
+ Rcx uint64
+ Rsp uint64
+ Rip uint64
+ Eflags uint64
+ Cs uint16
+ Gs uint16 // always 0 on amd64.
+ Fs uint16 // always 0 on amd64.
+ Ss uint16 // only restored if _UC_STRICT_RESTORE_SS (unsupported).
+ Err uint64
+ Trapno uint64
+ Oldmask linux.SignalSet
+ Cr2 uint64
+ // Pointer to a struct _fpstate.
+ Fpstate uint64
+ Reserved [8]uint64
+}
+
+// Flags for UContext64.Flags.
+const (
+ _UC_FP_XSTATE = 1
+ _UC_SIGCONTEXT_SS = 2
+ _UC_STRICT_RESTORE_SS = 4
+)
+
+// UContext64 is equivalent to ucontext_t on 64-bit x86.
+type UContext64 struct {
+ Flags uint64
+ Link uint64
+ Stack SignalStack
+ MContext SignalContext64
+ Sigset linux.SignalSet
+}
+
+// NewSignalAct implements Context.NewSignalAct.
+func (c *context64) NewSignalAct() NativeSignalAct {
+ return &SignalAct{}
+}
+
+// NewSignalStack implements Context.NewSignalStack.
+func (c *context64) NewSignalStack() NativeSignalStack {
+ return &SignalStack{}
+}
+
+// From Linux 'arch/x86/include/uapi/asm/sigcontext.h' the following is the
+// size of the magic cookie at the end of the xsave frame.
+//
+// NOTE(b/33003106#comment11): Currently we don't actually populate the fpstate
+// on the signal stack.
+const _FP_XSTATE_MAGIC2_SIZE = 4
+
+func (c *context64) fpuFrameSize() (size int, useXsave bool) {
+ size = len(c.x86FPState)
+ if size > 512 {
+ // Make room for the magic cookie at the end of the xsave frame.
+ size += _FP_XSTATE_MAGIC2_SIZE
+ useXsave = true
+ }
+ return size, useXsave
+}
+
+// SignalSetup implements Context.SignalSetup. (Compare to Linux's
+// arch/x86/kernel/signal.c:__setup_rt_frame().)
+func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt *SignalStack, sigset linux.SignalSet) error {
+ sp := st.Bottom
+
+ // "The 128-byte area beyond the location pointed to by %rsp is considered
+ // to be reserved and shall not be modified by signal or interrupt
+ // handlers. ... leaf functions may use this area for their entire stack
+ // frame, rather than adjusting the stack pointer in the prologue and
+ // epilogue." - AMD64 ABI
+ //
+ // (But this doesn't apply if we're starting at the top of the signal
+ // stack, in which case there is no following stack frame.)
+ if !(alt.IsEnabled() && sp == alt.Top()) {
+ sp -= 128
+ }
+
+ // Allocate space for floating point state on the stack.
+ //
+ // This isn't strictly necessary because we don't actually populate
+ // the fpstate. However we do store the floating point state of the
+ // interrupted thread inside the sentry. Simply accounting for this
+ // space on the user stack naturally caps the amount of memory the
+ // sentry will allocate for this purpose.
+ fpSize, _ := c.fpuFrameSize()
+ sp = (sp - usermem.Addr(fpSize)) & ^usermem.Addr(63)
+
+ // Construct the UContext64 now since we need its size.
+ uc := &UContext64{
+ // No _UC_FP_XSTATE: see Fpstate above.
+ // No _UC_STRICT_RESTORE_SS: we don't allow SS changes.
+ Flags: _UC_SIGCONTEXT_SS,
+ Stack: *alt,
+ MContext: SignalContext64{
+ R8: c.Regs.R8,
+ R9: c.Regs.R9,
+ R10: c.Regs.R10,
+ R11: c.Regs.R11,
+ R12: c.Regs.R12,
+ R13: c.Regs.R13,
+ R14: c.Regs.R14,
+ R15: c.Regs.R15,
+ Rdi: c.Regs.Rdi,
+ Rsi: c.Regs.Rsi,
+ Rbp: c.Regs.Rbp,
+ Rbx: c.Regs.Rbx,
+ Rdx: c.Regs.Rdx,
+ Rax: c.Regs.Rax,
+ Rcx: c.Regs.Rcx,
+ Rsp: c.Regs.Rsp,
+ Rip: c.Regs.Rip,
+ Eflags: c.Regs.Eflags,
+ Cs: uint16(c.Regs.Cs),
+ Ss: uint16(c.Regs.Ss),
+ Oldmask: sigset,
+ },
+ Sigset: sigset,
+ }
+
+ // TODO(gvisor.dev/issue/159): Set SignalContext64.Err, Trapno, and Cr2
+ // based on the fault that caused the signal. For now, leave Err and
+ // Trapno unset and assume CR2 == info.Addr() for SIGSEGVs and
+ // SIGBUSes.
+ if linux.Signal(info.Signo) == linux.SIGSEGV || linux.Signal(info.Signo) == linux.SIGBUS {
+ uc.MContext.Cr2 = info.Addr()
+ }
+
+ // "... the value (%rsp+8) is always a multiple of 16 (...) when
+ // control is transferred to the function entry point." - AMD64 ABI
+ ucSize := binary.Size(uc)
+ if ucSize < 0 {
+ // This can only happen if we've screwed up the definition of
+ // UContext64.
+ panic("can't get size of UContext64")
+ }
+ // st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128.
+ frameSize := int(st.Arch.Width()) + ucSize + 128
+ frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8
+ sp = frameBottom + usermem.Addr(frameSize)
+ st.Bottom = sp
+
+ // Prior to proceeding, figure out if the frame will exhaust the range
+ // for the signal stack. This is not allowed, and should immediately
+ // force signal delivery (reverting to the default handler).
+ if act.IsOnStack() && alt.IsEnabled() && !alt.Contains(frameBottom) {
+ return syscall.EFAULT
+ }
+
+ // Adjust the code.
+ info.FixSignalCodeForUser()
+
+ // Set up the stack frame.
+ infoAddr, err := st.Push(info)
+ if err != nil {
+ return err
+ }
+ ucAddr, err := st.Push(uc)
+ if err != nil {
+ return err
+ }
+ if act.HasRestorer() {
+ // Push the restorer return address.
+ // Note that this doesn't need to be popped.
+ if _, err := st.Push(usermem.Addr(act.Restorer)); err != nil {
+ return err
+ }
+ } else {
+ // amd64 requires a restorer.
+ return syscall.EFAULT
+ }
+
+ // Set up registers.
+ c.Regs.Rip = act.Handler
+ c.Regs.Rsp = uint64(st.Bottom)
+ c.Regs.Rdi = uint64(info.Signo)
+ c.Regs.Rsi = uint64(infoAddr)
+ c.Regs.Rdx = uint64(ucAddr)
+ c.Regs.Rax = 0
+ c.Regs.Ds = userDS
+ c.Regs.Es = userDS
+ c.Regs.Cs = userCS
+ c.Regs.Ss = userDS
+
+ // Save the thread's floating point state.
+ c.sigFPState = append(c.sigFPState, c.x86FPState)
+
+ // Signal handler gets a clean floating point state.
+ c.x86FPState = newX86FPState()
+
+ return nil
+}
+
+// SignalRestore implements Context.SignalRestore. (Compare to Linux's
+// arch/x86/kernel/signal.c:sys_rt_sigreturn().)
+func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, SignalStack, error) {
+ // Copy out the stack frame.
+ var uc UContext64
+ if _, err := st.Pop(&uc); err != nil {
+ return 0, SignalStack{}, err
+ }
+ var info SignalInfo
+ if _, err := st.Pop(&info); err != nil {
+ return 0, SignalStack{}, err
+ }
+
+ // Restore registers.
+ c.Regs.R8 = uc.MContext.R8
+ c.Regs.R9 = uc.MContext.R9
+ c.Regs.R10 = uc.MContext.R10
+ c.Regs.R11 = uc.MContext.R11
+ c.Regs.R12 = uc.MContext.R12
+ c.Regs.R13 = uc.MContext.R13
+ c.Regs.R14 = uc.MContext.R14
+ c.Regs.R15 = uc.MContext.R15
+ c.Regs.Rdi = uc.MContext.Rdi
+ c.Regs.Rsi = uc.MContext.Rsi
+ c.Regs.Rbp = uc.MContext.Rbp
+ c.Regs.Rbx = uc.MContext.Rbx
+ c.Regs.Rdx = uc.MContext.Rdx
+ c.Regs.Rax = uc.MContext.Rax
+ c.Regs.Rcx = uc.MContext.Rcx
+ c.Regs.Rsp = uc.MContext.Rsp
+ c.Regs.Rip = uc.MContext.Rip
+ c.Regs.Eflags = (c.Regs.Eflags & ^eflagsRestorable) | (uc.MContext.Eflags & eflagsRestorable)
+ c.Regs.Cs = uint64(uc.MContext.Cs) | 3
+ // N.B. _UC_STRICT_RESTORE_SS not supported.
+ c.Regs.Orig_rax = math.MaxUint64
+
+ // Restore floating point state.
+ l := len(c.sigFPState)
+ if l > 0 {
+ c.x86FPState = c.sigFPState[l-1]
+ // NOTE(cl/133042258): State save requires that any slice
+ // elements from '[len:cap]' to be zero value.
+ c.sigFPState[l-1] = nil
+ c.sigFPState = c.sigFPState[0 : l-1]
+ } else {
+ // This might happen if sigreturn(2) calls are unbalanced with
+ // respect to signal handler entries. This is not expected so
+ // don't bother to do anything fancy with the floating point
+ // state.
+ log.Infof("sigreturn unable to restore application fpstate")
+ }
+
+ return uc.Sigset, uc.Stack, nil
+}
diff --git a/pkg/sentry/arch/signal_info.go b/pkg/sentry/arch/signal_info.go
new file mode 100644
index 000000000..f93ee8b46
--- /dev/null
+++ b/pkg/sentry/arch/signal_info.go
@@ -0,0 +1,66 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+// Possible values for SignalInfo.Code. These values originate from the Linux
+// kernel's include/uapi/asm-generic/siginfo.h.
+const (
+ // SignalInfoUser (properly SI_USER) indicates that a signal was sent from
+ // a kill() or raise() syscall.
+ SignalInfoUser = 0
+
+ // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent
+ // by the kernel.
+ SignalInfoKernel = 0x80
+
+ // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent
+ // by an expired timer.
+ SignalInfoTimer = -2
+
+ // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent
+ // from a tkill() or tgkill() syscall.
+ SignalInfoTkill = -6
+
+ // CLD_* codes are only meaningful for SIGCHLD.
+
+ // CLD_EXITED indicates that a task exited.
+ CLD_EXITED = 1
+
+ // CLD_KILLED indicates that a task was killed by a signal.
+ CLD_KILLED = 2
+
+ // CLD_DUMPED indicates that a task was killed by a signal and then dumped
+ // core.
+ CLD_DUMPED = 3
+
+ // CLD_TRAPPED indicates that a task was stopped by ptrace.
+ CLD_TRAPPED = 4
+
+ // CLD_STOPPED indicates that a thread group completed a group stop.
+ CLD_STOPPED = 5
+
+ // CLD_CONTINUED indicates that a group-stopped thread group was continued.
+ CLD_CONTINUED = 6
+
+ // SYS_* codes are only meaningful for SIGSYS.
+
+ // SYS_SECCOMP indicates that a signal originates from seccomp.
+ SYS_SECCOMP = 1
+
+ // TRAP_* codes are only meaningful for SIGTRAP.
+
+ // TRAP_BRKPT indicates a breakpoint trap.
+ TRAP_BRKPT = 1
+)
diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go
new file mode 100644
index 000000000..a442f9fdc
--- /dev/null
+++ b/pkg/sentry/arch/signal_stack.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build i386 amd64
+
+package arch
+
+import (
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+const (
+ // SignalStackFlagOnStack is possible set on return from getaltstack,
+ // in order to indicate that the thread is currently on the alt stack.
+ SignalStackFlagOnStack = 1
+
+ // SignalStackFlagDisable is a flag to indicate the stack is disabled.
+ SignalStackFlagDisable = 2
+)
+
+// IsEnabled returns true iff this signal stack is marked as enabled.
+func (s SignalStack) IsEnabled() bool {
+ return s.Flags&SignalStackFlagDisable == 0
+}
+
+// Top returns the stack's top address.
+func (s SignalStack) Top() usermem.Addr {
+ return usermem.Addr(s.Addr + s.Size)
+}
+
+// SetOnStack marks this signal stack as in use.
+//
+// Note that there is no corresponding ClearOnStack, and that this should only
+// be called on copies that are serialized to userspace.
+func (s *SignalStack) SetOnStack() {
+ s.Flags |= SignalStackFlagOnStack
+}
+
+// Contains checks if the stack pointer is within this stack.
+func (s *SignalStack) Contains(sp usermem.Addr) bool {
+ return usermem.Addr(s.Addr) < sp && sp <= usermem.Addr(s.Addr+s.Size)
+}
+
+// NativeSignalStack is a type that is equivalent to stack_t in the guest
+// architecture.
+type NativeSignalStack interface {
+ // SerializeFrom copies the data in the host SignalStack s into this
+ // object.
+ SerializeFrom(s *SignalStack)
+
+ // DeserializeTo copies the data in this object into the host SignalStack
+ // s.
+ DeserializeTo(s *SignalStack)
+}
diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go
new file mode 100644
index 000000000..7e6324e82
--- /dev/null
+++ b/pkg/sentry/arch/stack.go
@@ -0,0 +1,252 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arch
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// Stack is a simple wrapper around a usermem.IO and an address.
+type Stack struct {
+ // Our arch info.
+ // We use this for automatic Native conversion of usermem.Addrs during
+ // Push() and Pop().
+ Arch Context
+
+ // The interface used to actually copy user memory.
+ IO usermem.IO
+
+ // Our current stack bottom.
+ Bottom usermem.Addr
+}
+
+// Push pushes the given values on to the stack.
+//
+// (This method supports Addrs and treats them as native types.)
+func (s *Stack) Push(vals ...interface{}) (usermem.Addr, error) {
+ for _, v := range vals {
+
+ // We convert some types to well-known serializable quanities.
+ var norm interface{}
+
+ // For array types, we will automatically add an appropriate
+ // terminal value. This is done simply to make the interface
+ // easier to use.
+ var term interface{}
+
+ switch v.(type) {
+ case string:
+ norm = []byte(v.(string))
+ term = byte(0)
+ case []int8, []uint8:
+ norm = v
+ term = byte(0)
+ case []int16, []uint16:
+ norm = v
+ term = uint16(0)
+ case []int32, []uint32:
+ norm = v
+ term = uint32(0)
+ case []int64, []uint64:
+ norm = v
+ term = uint64(0)
+ case []usermem.Addr:
+ // Special case: simply push recursively.
+ _, err := s.Push(s.Arch.Native(uintptr(0)))
+ if err != nil {
+ return 0, err
+ }
+ varr := v.([]usermem.Addr)
+ for i := len(varr) - 1; i >= 0; i-- {
+ _, err := s.Push(varr[i])
+ if err != nil {
+ return 0, err
+ }
+ }
+ continue
+ case usermem.Addr:
+ norm = s.Arch.Native(uintptr(v.(usermem.Addr)))
+ default:
+ norm = v
+ }
+
+ if term != nil {
+ _, err := s.Push(term)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ c := binary.Size(norm)
+ if c < 0 {
+ return 0, fmt.Errorf("bad binary.Size for %T", v)
+ }
+ // TODO(b/38173783): Use a real context.Context.
+ n, err := usermem.CopyObjectOut(context.Background(), s.IO, s.Bottom-usermem.Addr(c), norm, usermem.IOOpts{})
+ if err != nil || c != n {
+ return 0, err
+ }
+
+ s.Bottom -= usermem.Addr(n)
+ }
+
+ return s.Bottom, nil
+}
+
+// Pop pops the given values off the stack.
+//
+// (This method supports Addrs and treats them as native types.)
+func (s *Stack) Pop(vals ...interface{}) (usermem.Addr, error) {
+ for _, v := range vals {
+
+ vaddr, isVaddr := v.(*usermem.Addr)
+
+ var n int
+ var err error
+ if isVaddr {
+ value := s.Arch.Native(uintptr(0))
+ // TODO(b/38173783): Use a real context.Context.
+ n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, value, usermem.IOOpts{})
+ *vaddr = usermem.Addr(s.Arch.Value(value))
+ } else {
+ // TODO(b/38173783): Use a real context.Context.
+ n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, v, usermem.IOOpts{})
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ s.Bottom += usermem.Addr(n)
+ }
+
+ return s.Bottom, nil
+}
+
+// Align aligns the stack to the given offset.
+func (s *Stack) Align(offset int) {
+ if s.Bottom%usermem.Addr(offset) != 0 {
+ s.Bottom -= (s.Bottom % usermem.Addr(offset))
+ }
+}
+
+// StackLayout describes the location of the arguments and environment on the
+// stack.
+type StackLayout struct {
+ // ArgvStart is the beginning of the argument vector.
+ ArgvStart usermem.Addr
+
+ // ArgvEnd is the end of the argument vector.
+ ArgvEnd usermem.Addr
+
+ // EnvvStart is the beginning of the environment vector.
+ EnvvStart usermem.Addr
+
+ // EnvvEnd is the end of the environment vector.
+ EnvvEnd usermem.Addr
+}
+
+// Load pushes the given args, env and aux vector to the stack using the
+// well-known format for a new executable. It returns the start and end
+// of the argument and environment vectors.
+func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) {
+ l := StackLayout{}
+
+ // Make sure we start with a 16-byte alignment.
+ s.Align(16)
+
+ // Push the environment vector so the end of the argument vector is adjacent to
+ // the beginning of the environment vector.
+ // While the System V abi for x86_64 does not specify an ordering to the
+ // Information Block (the block holding the arg, env, and aux vectors),
+ // support features like setproctitle(3) naturally expect these segments
+ // to be in this order. See: https://www.uclibc.org/docs/psABI-x86_64.pdf
+ // page 29.
+ l.EnvvEnd = s.Bottom
+ envAddrs := make([]usermem.Addr, len(env))
+ for i := len(env) - 1; i >= 0; i-- {
+ addr, err := s.Push(env[i])
+ if err != nil {
+ return StackLayout{}, err
+ }
+ envAddrs[i] = addr
+ }
+ l.EnvvStart = s.Bottom
+
+ // Push our strings.
+ l.ArgvEnd = s.Bottom
+ argAddrs := make([]usermem.Addr, len(args))
+ for i := len(args) - 1; i >= 0; i-- {
+ addr, err := s.Push(args[i])
+ if err != nil {
+ return StackLayout{}, err
+ }
+ argAddrs[i] = addr
+ }
+ l.ArgvStart = s.Bottom
+
+ // We need to align the arguments appropriately.
+ //
+ // We must finish on a 16-byte alignment, but we'll play it
+ // conservatively and finish at 32-bytes. It would be nice to be able
+ // to call Align here, but unfortunately we need to align the stack
+ // with all the variable sized arrays pushed. So we just need to do
+ // some calculations.
+ argvSize := s.Arch.Width() * uint(len(args)+1)
+ envvSize := s.Arch.Width() * uint(len(env)+1)
+ auxvSize := s.Arch.Width() * 2 * uint(len(aux)+1)
+ total := usermem.Addr(argvSize) + usermem.Addr(envvSize) + usermem.Addr(auxvSize) + usermem.Addr(s.Arch.Width())
+ expectedBottom := s.Bottom - total
+ if expectedBottom%32 != 0 {
+ s.Bottom -= expectedBottom % 32
+ }
+
+ // Push our auxvec.
+ // NOTE: We need an extra zero here per spec.
+ // The Push function will automatically terminate
+ // strings and arrays with a single null value.
+ auxv := make([]usermem.Addr, 0, len(aux))
+ for _, a := range aux {
+ auxv = append(auxv, usermem.Addr(a.Key), a.Value)
+ }
+ auxv = append(auxv, usermem.Addr(0))
+ _, err := s.Push(auxv)
+ if err != nil {
+ return StackLayout{}, err
+ }
+
+ // Push environment.
+ _, err = s.Push(envAddrs)
+ if err != nil {
+ return StackLayout{}, err
+ }
+
+ // Push args.
+ _, err = s.Push(argAddrs)
+ if err != nil {
+ return StackLayout{}, err
+ }
+
+ // Push arg count.
+ _, err = s.Push(usermem.Addr(len(args)))
+ if err != nil {
+ return StackLayout{}, err
+ }
+
+ return l, nil
+}
diff --git a/pkg/sentry/arch/syscalls_amd64.go b/pkg/sentry/arch/syscalls_amd64.go
new file mode 100644
index 000000000..8b4f23007
--- /dev/null
+++ b/pkg/sentry/arch/syscalls_amd64.go
@@ -0,0 +1,52 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+package arch
+
+const restartSyscallNr = uintptr(219)
+
+// SyscallNo returns the syscall number according to the 64-bit convention.
+func (c *context64) SyscallNo() uintptr {
+ return uintptr(c.Regs.Orig_rax)
+}
+
+// SyscallArgs provides syscall arguments according to the 64-bit convention.
+//
+// Due to the way addresses are mapped for the sentry this binary *must* be
+// built in 64-bit mode. So we can just assume the syscall numbers that come
+// back match the expected host system call numbers.
+func (c *context64) SyscallArgs() SyscallArguments {
+ return SyscallArguments{
+ SyscallArgument{Value: uintptr(c.Regs.Rdi)},
+ SyscallArgument{Value: uintptr(c.Regs.Rsi)},
+ SyscallArgument{Value: uintptr(c.Regs.Rdx)},
+ SyscallArgument{Value: uintptr(c.Regs.R10)},
+ SyscallArgument{Value: uintptr(c.Regs.R8)},
+ SyscallArgument{Value: uintptr(c.Regs.R9)},
+ }
+}
+
+// RestartSyscall implements Context.RestartSyscall.
+func (c *context64) RestartSyscall() {
+ c.Regs.Rip -= SyscallWidth
+ c.Regs.Rax = c.Regs.Orig_rax
+}
+
+// RestartSyscallWithRestartBlock implements Context.RestartSyscallWithRestartBlock.
+func (c *context64) RestartSyscallWithRestartBlock() {
+ c.Regs.Rip -= SyscallWidth
+ c.Regs.Rax = uint64(restartSyscallNr)
+}