summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/arch
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/arch')
-rw-r--r--pkg/sentry/arch/arch.go20
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go56
-rw-r--r--pkg/sentry/arch/arch_amd64.go32
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go130
-rw-r--r--pkg/sentry/arch/arch_arm64.go28
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go74
-rw-r--r--pkg/sentry/arch/auxv.go4
-rw-r--r--pkg/sentry/arch/fpu/fpu_amd64.go16
-rw-r--r--pkg/sentry/arch/signal.go50
-rw-r--r--pkg/sentry/arch/signal_amd64.go8
-rw-r--r--pkg/sentry/arch/signal_arm64.go6
-rw-r--r--pkg/sentry/arch/signal_stack.go10
-rw-r--r--pkg/sentry/arch/stack.go44
-rw-r--r--pkg/sentry/arch/stack_unsafe.go6
14 files changed, 243 insertions, 241 deletions
diff --git a/pkg/sentry/arch/arch.go b/pkg/sentry/arch/arch.go
index 921151137..290863ee6 100644
--- a/pkg/sentry/arch/arch.go
+++ b/pkg/sentry/arch/arch.go
@@ -22,11 +22,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Arch describes an architecture.
@@ -188,11 +188,11 @@ type Context interface {
// returned layout must be no lower than min, and MaxAddr for the returned
// layout must be no higher than max. Repeated calls to NewMmapLayout may
// return different layouts.
- NewMmapLayout(min, max usermem.Addr, limits *limits.LimitSet) (MmapLayout, error)
+ NewMmapLayout(min, max hostarch.Addr, limits *limits.LimitSet) (MmapLayout, error)
// PIELoadAddress returns a preferred load address for a
// position-independent executable within l.
- PIELoadAddress(l MmapLayout) usermem.Addr
+ PIELoadAddress(l MmapLayout) hostarch.Addr
// FeatureSet returns the FeatureSet in use in this context.
FeatureSet() *cpuid.FeatureSet
@@ -257,18 +257,18 @@ const (
// +stateify savable
type MmapLayout struct {
// MinAddr is the lowest mappable address.
- MinAddr usermem.Addr
+ MinAddr hostarch.Addr
// MaxAddr is the highest mappable address.
- MaxAddr usermem.Addr
+ MaxAddr hostarch.Addr
// BottomUpBase is the lowest address that may be returned for a
// MmapBottomUp mmap.
- BottomUpBase usermem.Addr
+ BottomUpBase hostarch.Addr
// TopDownBase is the highest address that may be returned for a
// MmapTopDown mmap.
- TopDownBase usermem.Addr
+ TopDownBase hostarch.Addr
// DefaultDirection is the direction for most non-fixed mmaps in this
// layout.
@@ -316,9 +316,9 @@ type SyscallArgument struct {
// SyscallArguments represents the set of arguments passed to a syscall.
type SyscallArguments [6]SyscallArgument
-// Pointer returns the usermem.Addr representation of a pointer argument.
-func (a SyscallArgument) Pointer() usermem.Addr {
- return usermem.Addr(a.Value)
+// Pointer returns the hostarch.Addr representation of a pointer argument.
+func (a SyscallArgument) Pointer() hostarch.Addr {
+ return hostarch.Addr(a.Value)
}
// Int returns the int32 representation of a 32-bit signed integer argument.
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index 308a9dfb4..6ab83879c 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -13,9 +13,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -36,11 +36,11 @@ func (s *SignalAct) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalAct) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Handler))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Restorer))
dst = dst[8:]
s.Mask.MarshalBytes(dst[:s.Mask.SizeBytes()])
dst = dst[s.Mask.SizeBytes():]
@@ -48,11 +48,11 @@ func (s *SignalAct) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalAct) UnmarshalBytes(src []byte) {
- s.Handler = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Handler = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Restorer = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Restorer = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Mask.UnmarshalBytes(src[:s.Mask.SizeBytes()])
src = src[s.Mask.SizeBytes():]
@@ -86,7 +86,7 @@ func (s *SignalAct) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Mask.Packed() {
// Type SignalAct doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -110,13 +110,13 @@ func (s *SignalAct) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit in
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalAct) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalAct) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Mask.Packed() {
// Type SignalAct doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -173,11 +173,11 @@ func (s *SignalInfo) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
@@ -189,11 +189,11 @@ func (s *SignalInfo) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalInfo) UnmarshalBytes(src []byte) {
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Signo = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Errno = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Code = int32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
@@ -221,7 +221,7 @@ func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -238,13 +238,13 @@ func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -282,25 +282,25 @@ func (s *SignalStack) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalStack) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Addr))
dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(uint32)] ~= uint32(0)
dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size))
dst = dst[8:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalStack) UnmarshalBytes(src []byte) {
- s.Addr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Addr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Flags = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ s.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ uint32 ~= src[:sizeof(uint32)]
src = src[4:]
- s.Size = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Size = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
@@ -322,7 +322,7 @@ func (s *SignalStack) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -339,13 +339,13 @@ func (s *SignalStack) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalStack) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalStack) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go
index 2571be60f..d6b4d2357 100644
--- a/pkg/sentry/arch/arch_amd64.go
+++ b/pkg/sentry/arch/arch_amd64.go
@@ -23,11 +23,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Host specifies the host architecture.
@@ -37,7 +37,7 @@ const Host = AMD64
const (
// maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux
// for a 64-bit process.
- maxAddr64 usermem.Addr = (1 << 47) - usermem.PageSize
+ maxAddr64 hostarch.Addr = (1 << 47) - hostarch.PageSize
// maxStackRand64 is the maximum randomization to apply to the stack.
// It is defined by arch/x86/mm/mmap.c:stack_maxrandom_size in Linux.
@@ -45,7 +45,7 @@ const (
// maxMmapRand64 is the maximum randomization to apply to the mmap
// layout. It is defined by arch/x86/mm/mmap.c:arch_mmap_rnd in Linux.
- maxMmapRand64 = (1 << 28) * usermem.PageSize
+ maxMmapRand64 = (1 << 28) * hostarch.PageSize
// minGap64 is the minimum gap to leave at the top of the address space
// for the stack. It is defined by arch/x86/mm/mmap.c:MIN_GAP in Linux.
@@ -56,7 +56,7 @@ const (
//
// The Platform {Min,Max}UserAddress() may preclude loading at this
// address. See other preferredFoo comments below.
- preferredPIELoadAddr usermem.Addr = maxAddr64 / 3 * 2
+ preferredPIELoadAddr hostarch.Addr = maxAddr64 / 3 * 2
)
// These constants are selected as heuristics to help make the Platform's
@@ -92,13 +92,13 @@ const (
// This is all "preferred" because the layout min/max address may not
// allow us to select such a TopDownBase, in which case we have to fall
// back to a layout that TSAN may not be happy with.
- preferredTopDownAllocMin usermem.Addr = 0x7e8000000000
- preferredAllocationGap = 128 << 30 // 128 GB
- preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
+ preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000
+ preferredAllocationGap = 128 << 30 // 128 GB
+ preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
// minMmapRand64 is the smallest we are willing to make the
// randomization to stay above preferredTopDownBaseMin.
- minMmapRand64 = (1 << 26) * usermem.PageSize
+ minMmapRand64 = (1 << 26) * hostarch.PageSize
)
// context64 represents an AMD64 context.
@@ -207,12 +207,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet {
}
// mmapRand returns a random adjustment for randomizing an mmap layout.
-func mmapRand(max uint64) usermem.Addr {
- return usermem.Addr(rand.Int63n(int64(max))).RoundDown()
+func mmapRand(max uint64) hostarch.Addr {
+ return hostarch.Addr(rand.Int63n(int64(max))).RoundDown()
}
// NewMmapLayout implements Context.NewMmapLayout consistently with Linux.
-func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) {
+func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) {
min, ok := min.RoundUp()
if !ok {
return MmapLayout{}, unix.EINVAL
@@ -230,7 +230,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
// MAX_GAP in Linux.
maxGap := (max / 6) * 5
- gap := usermem.Addr(stackSize.Cur)
+ gap := hostarch.Addr(stackSize.Cur)
if gap < minGap64 {
gap = minGap64
}
@@ -243,7 +243,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
topDownMin := max - gap - maxMmapRand64
- maxRand := usermem.Addr(maxMmapRand64)
+ maxRand := hostarch.Addr(maxMmapRand64)
if topDownMin < preferredTopDownBaseMin {
// Try to keep TopDownBase above preferredTopDownBaseMin by
// shrinking maxRand.
@@ -278,7 +278,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
// PIELoadAddress implements Context.PIELoadAddress.
-func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr {
+func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr {
base := preferredPIELoadAddr
max, ok := base.AddLength(maxMmapRand64)
if !ok {
@@ -311,7 +311,7 @@ func (c *context64) PtracePeekUser(addr uintptr) (marshal.Marshallable, error) {
regs := c.ptraceGetRegs()
buf := make([]byte, regs.SizeBytes())
regs.MarshalUnsafe(buf)
- return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil
+ return c.Native(uintptr(hostarch.ByteOrder.Uint64(buf[addr:]))), nil
}
// Note: x86 debug registers are missing.
return c.Native(0), nil
@@ -326,7 +326,7 @@ func (c *context64) PtracePokeUser(addr, data uintptr) error {
regs := c.ptraceGetRegs()
buf := make([]byte, regs.SizeBytes())
regs.MarshalUnsafe(buf)
- usermem.ByteOrder.PutUint64(buf[addr:], uint64(data))
+ hostarch.ByteOrder.PutUint64(buf[addr:], uint64(data))
_, err := c.PtraceSetRegs(bytes.NewBuffer(buf))
return err
}
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index 767dbfe0b..d8f71795b 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -15,9 +15,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -39,124 +39,124 @@ func (s *SignalContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R8))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R9))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R10))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R11))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R12))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R13))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R14))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.R15))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsi))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rbx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rax))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rcx))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rsp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rip))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Eflags))
dst = dst[8:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Cs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Gs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Fs))
dst = dst[2:]
- usermem.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
+ hostarch.ByteOrder.PutUint16(dst[:2], uint16(s.Ss))
dst = dst[2:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Err))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Trapno))
dst = dst[8:]
s.Oldmask.MarshalBytes(dst[:s.Oldmask.SizeBytes()])
dst = dst[s.Oldmask.SizeBytes():]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Cr2))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Fpstate))
dst = dst[8:]
for idx := 0; idx < 8; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Reserved[idx]))
dst = dst[8:]
}
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.R8 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R8 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R9 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R9 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R10 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R10 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R11 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R11 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R12 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R12 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R13 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R13 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R14 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R14 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.R15 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.R15 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rdi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rsi = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rsi = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rbp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rbp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rbx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rbx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rdx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rdx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rax = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rax = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rcx = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rcx = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rsp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rsp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Rip = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Rip = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Eflags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Eflags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Cs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Cs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Gs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Gs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Fs = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Fs = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Ss = uint16(usermem.ByteOrder.Uint16(src[:2]))
+ s.Ss = uint16(hostarch.ByteOrder.Uint16(src[:2]))
src = src[2:]
- s.Err = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Err = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Trapno = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Trapno = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
s.Oldmask.UnmarshalBytes(src[:s.Oldmask.SizeBytes()])
src = src[s.Oldmask.SizeBytes():]
- s.Cr2 = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Cr2 = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Fpstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Fpstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 8; idx++ {
- s.Reserved[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Reserved[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -189,7 +189,7 @@ func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Oldmask.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -213,13 +213,13 @@ func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Oldmask.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -278,9 +278,9 @@ func (u *UContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
dst = dst[8:]
u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
dst = dst[u.Stack.SizeBytes():]
@@ -292,9 +292,9 @@ func (u *UContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
src = src[u.Stack.SizeBytes():]
@@ -332,7 +332,7 @@ func (u *UContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -356,13 +356,13 @@ func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
diff --git a/pkg/sentry/arch/arch_arm64.go b/pkg/sentry/arch/arch_arm64.go
index 14ad9483b..348f238fd 100644
--- a/pkg/sentry/arch/arch_arm64.go
+++ b/pkg/sentry/arch/arch_arm64.go
@@ -22,11 +22,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/usermem"
)
// Host specifies the host architecture.
@@ -36,7 +36,7 @@ const Host = ARM64
const (
// maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux
// for a 64-bit process.
- maxAddr64 usermem.Addr = (1 << 48)
+ maxAddr64 hostarch.Addr = (1 << 48)
// maxStackRand64 is the maximum randomization to apply to the stack.
// It is defined by arch/arm64/mm/mmap.c:(STACK_RND_MASK << PAGE_SHIFT) in Linux.
@@ -44,7 +44,7 @@ const (
// maxMmapRand64 is the maximum randomization to apply to the mmap
// layout. It is defined by arch/arm64/mm/mmap.c:arch_mmap_rnd in Linux.
- maxMmapRand64 = (1 << 33) * usermem.PageSize
+ maxMmapRand64 = (1 << 33) * hostarch.PageSize
// minGap64 is the minimum gap to leave at the top of the address space
// for the stack. It is defined by arch/arm64/mm/mmap.c:MIN_GAP in Linux.
@@ -55,7 +55,7 @@ const (
//
// The Platform {Min,Max}UserAddress() may preclude loading at this
// address. See other preferredFoo comments below.
- preferredPIELoadAddr usermem.Addr = maxAddr64 / 6 * 5
+ preferredPIELoadAddr hostarch.Addr = maxAddr64 / 6 * 5
)
var (
@@ -66,13 +66,13 @@ var (
// These constants are selected as heuristics to help make the Platform's
// potentially limited address space conform as closely to Linux as possible.
const (
- preferredTopDownAllocMin usermem.Addr = 0x7e8000000000
- preferredAllocationGap = 128 << 30 // 128 GB
- preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
+ preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000
+ preferredAllocationGap = 128 << 30 // 128 GB
+ preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap
// minMmapRand64 is the smallest we are willing to make the
// randomization to stay above preferredTopDownBaseMin.
- minMmapRand64 = (1 << 18) * usermem.PageSize
+ minMmapRand64 = (1 << 18) * hostarch.PageSize
)
// context64 represents an ARM64 context.
@@ -187,12 +187,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet {
}
// mmapRand returns a random adjustment for randomizing an mmap layout.
-func mmapRand(max uint64) usermem.Addr {
- return usermem.Addr(rand.Int63n(int64(max))).RoundDown()
+func mmapRand(max uint64) hostarch.Addr {
+ return hostarch.Addr(rand.Int63n(int64(max))).RoundDown()
}
// NewMmapLayout implements Context.NewMmapLayout consistently with Linux.
-func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) {
+func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) {
min, ok := min.RoundUp()
if !ok {
return MmapLayout{}, unix.EINVAL
@@ -210,7 +210,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
// MAX_GAP in Linux.
maxGap := (max / 6) * 5
- gap := usermem.Addr(stackSize.Cur)
+ gap := hostarch.Addr(stackSize.Cur)
if gap < minGap64 {
gap = minGap64
}
@@ -223,7 +223,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
topDownMin := max - gap - maxMmapRand64
- maxRand := usermem.Addr(maxMmapRand64)
+ maxRand := hostarch.Addr(maxMmapRand64)
if topDownMin < preferredTopDownBaseMin {
// Try to keep TopDownBase above preferredTopDownBaseMin by
// shrinking maxRand.
@@ -258,7 +258,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm
}
// PIELoadAddress implements Context.PIELoadAddress.
-func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr {
+func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr {
base := preferredPIELoadAddr
max, ok := base.AddLength(maxMmapRand64)
if !ok {
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index 2faa3a852..40d0c1c75 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -15,9 +15,9 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/safecopy"
- "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
"runtime"
@@ -43,12 +43,12 @@ func (f *FpsimdContext) SizeBytes() int {
func (f *FpsimdContext) MarshalBytes(dst []byte) {
f.Head.MarshalBytes(dst[:f.Head.SizeBytes()])
dst = dst[f.Head.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpsr))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(f.Fpcr))
dst = dst[4:]
for idx := 0; idx < 64; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(f.Vregs[idx]))
dst = dst[8:]
}
}
@@ -57,12 +57,12 @@ func (f *FpsimdContext) MarshalBytes(dst []byte) {
func (f *FpsimdContext) UnmarshalBytes(src []byte) {
f.Head.UnmarshalBytes(src[:f.Head.SizeBytes()])
src = src[f.Head.SizeBytes():]
- f.Fpsr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Fpsr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- f.Fpcr = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ f.Fpcr = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
for idx := 0; idx < 64; idx++ {
- f.Vregs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ f.Vregs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
}
@@ -95,7 +95,7 @@ func (f *FpsimdContext) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !f.Head.Packed() {
// Type FpsimdContext doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -119,13 +119,13 @@ func (f *FpsimdContext) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limi
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FpsimdContext) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return f.CopyOutN(cc, addr, f.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (f *FpsimdContext) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !f.Head.Packed() {
// Type FpsimdContext doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
@@ -184,17 +184,17 @@ func (s *SignalContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (s *SignalContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr))
dst = dst[8:]
for idx := 0; idx < 31; idx++ {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx]))
dst = dst[8:]
}
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Sp))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pc))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate))
dst = dst[8:]
for idx := 0; idx < 8; idx++ {
dst[0] = byte(s._pad[idx])
@@ -206,17 +206,17 @@ func (s *SignalContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (s *SignalContext64) UnmarshalBytes(src []byte) {
- s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.FaultAddr = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 31; idx++ {
- s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
}
- s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ s.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
for idx := 0; idx < 8; idx++ {
s._pad[idx] = src[0]
@@ -254,7 +254,7 @@ func (s *SignalContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !s.Fpsimd64.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -278,13 +278,13 @@ func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return s.CopyOutN(cc, addr, s.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !s.Fpsimd64.Packed() {
// Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay.
@@ -345,9 +345,9 @@ func (u *UContext64) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ hostarch.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
dst = dst[8:]
u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
dst = dst[u.Stack.SizeBytes():]
@@ -367,9 +367,9 @@ func (u *UContext64) MarshalBytes(dst []byte) {
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Flags = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ u.Link = uint64(hostarch.ByteOrder.Uint64(src[:8]))
src = src[8:]
u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
src = src[u.Stack.SizeBytes():]
@@ -415,7 +415,7 @@ func (u *UContext64) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -439,13 +439,13 @@ func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return u.CopyOutN(cc, addr, u.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
// Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
@@ -501,17 +501,17 @@ func (a *aarch64Ctx) SizeBytes() int {
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
func (a *aarch64Ctx) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Magic))
dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
+ hostarch.ByteOrder.PutUint32(dst[:4], uint32(a.Size))
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
func (a *aarch64Ctx) UnmarshalBytes(src []byte) {
- a.Magic = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ a.Magic = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
- a.Size = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ a.Size = uint32(hostarch.ByteOrder.Uint32(src[:4]))
src = src[4:]
}
@@ -533,7 +533,7 @@ func (a *aarch64Ctx) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -550,13 +550,13 @@ func (a *aarch64Ctx) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit i
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (a *aarch64Ctx) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
return a.CopyOutN(cc, addr, a.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (a *aarch64Ctx) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
diff --git a/pkg/sentry/arch/auxv.go b/pkg/sentry/arch/auxv.go
index 2b4c8f3fc..19ca18121 100644
--- a/pkg/sentry/arch/auxv.go
+++ b/pkg/sentry/arch/auxv.go
@@ -15,7 +15,7 @@
package arch
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// An AuxEntry represents an entry in an ELF auxiliary vector.
@@ -23,7 +23,7 @@ import (
// +stateify savable
type AuxEntry struct {
Key uint64
- Value usermem.Addr
+ Value hostarch.Addr
}
// An Auxv represents an ELF auxiliary vector.
diff --git a/pkg/sentry/arch/fpu/fpu_amd64.go b/pkg/sentry/arch/fpu/fpu_amd64.go
index 3a62f51be..1e9625bee 100644
--- a/pkg/sentry/arch/fpu/fpu_amd64.go
+++ b/pkg/sentry/arch/fpu/fpu_amd64.go
@@ -21,9 +21,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
)
// initX86FPState (defined in asm files) sets up initial state.
@@ -146,11 +146,11 @@ const (
// any of the reserved bits of the MXCSR register." - Intel SDM Vol. 1, Section
// 10.5.1.2 "SSE State")
func sanitizeMXCSR(f State) {
- mxcsr := usermem.ByteOrder.Uint32(f[mxcsrOffset:])
+ mxcsr := hostarch.ByteOrder.Uint32(f[mxcsrOffset:])
initMXCSRMask.Do(func() {
temp := State(alignedBytes(uint(ptraceFPRegsSize), 16))
initX86FPState(&temp[0], false /* useXsave */)
- mxcsrMask = usermem.ByteOrder.Uint32(temp[mxcsrMaskOffset:])
+ mxcsrMask = hostarch.ByteOrder.Uint32(temp[mxcsrMaskOffset:])
if mxcsrMask == 0 {
// "If the value of the MXCSR_MASK field is 00000000H, then the
// MXCSR_MASK value is the default value of 0000FFBFH." - Intel SDM
@@ -160,7 +160,7 @@ func sanitizeMXCSR(f State) {
}
})
mxcsr &= mxcsrMask
- usermem.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)
+ hostarch.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr)
}
// PtraceGetXstateRegs implements ptrace(PTRACE_GETREGS, NT_X86_XSTATE) by
@@ -177,7 +177,7 @@ func (s *State) PtraceGetXstateRegs(dst io.Writer, maxlen int, featureSet *cpuid
// Area". Linux uses the first 8 bytes of this area to store the OS XSTATE
// mask. GDB relies on this: see
// gdb/x86-linux-nat.c:x86_linux_read_description().
- usermem.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask())
+ hostarch.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask())
if len(f) > maxlen {
f = f[:maxlen]
}
@@ -208,9 +208,9 @@ func (s *State) PtraceSetXstateRegs(src io.Reader, maxlen int, featureSet *cpuid
// Force reserved bits in MXCSR to 0. This is consistent with Linux.
sanitizeMXCSR(State(f))
// Users can't enable *more* XCR0 bits than what we, and the CPU, support.
- xstateBV := usermem.ByteOrder.Uint64(f[xstateBVOffset:])
+ xstateBV := hostarch.ByteOrder.Uint64(f[xstateBVOffset:])
xstateBV &= featureSet.ValidXCR0Mask()
- usermem.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV)
+ hostarch.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV)
// Force XCOMP_BV and reserved bytes in the XSAVE header to 0.
reserved := f[xsaveHeaderZeroedOffset : xsaveHeaderZeroedOffset+xsaveHeaderZeroedBytes]
for i := range reserved {
@@ -266,7 +266,7 @@ func (s *State) AfterLoad() {
// What was in use?
savedBV := fxsaveBV
if len(old) >= xstateBVOffset+8 {
- savedBV = usermem.ByteOrder.Uint64(old[xstateBVOffset:])
+ savedBV = hostarch.ByteOrder.Uint64(old[xstateBVOffset:])
}
// Supported features must be a superset of saved features.
diff --git a/pkg/sentry/arch/signal.go b/pkg/sentry/arch/signal.go
index 35d2e07c3..67d7edf68 100644
--- a/pkg/sentry/arch/signal.go
+++ b/pkg/sentry/arch/signal.go
@@ -16,7 +16,7 @@ package arch
import (
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// SignalAct represents the action that should be taken when a signal is
@@ -154,107 +154,107 @@ func (s *SignalInfo) FixSignalCodeForUser() {
// PID returns the si_pid field.
func (s *SignalInfo) PID() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[0:4]))
}
// SetPID mutates the si_pid field.
func (s *SignalInfo) SetPID(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
}
// UID returns the si_uid field.
func (s *SignalInfo) UID() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8]))
}
// SetUID mutates the si_uid field.
func (s *SignalInfo) SetUID(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
}
// Sigval returns the sigval field, which is aliased to both si_int and si_ptr.
func (s *SignalInfo) Sigval() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[8:16])
+ return hostarch.ByteOrder.Uint64(s.Fields[8:16])
}
// SetSigval mutates the sigval field.
func (s *SignalInfo) SetSigval(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[8:16], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[8:16], val)
}
// TimerID returns the si_timerid field.
func (s *SignalInfo) TimerID() linux.TimerID {
- return linux.TimerID(usermem.ByteOrder.Uint32(s.Fields[0:4]))
+ return linux.TimerID(hostarch.ByteOrder.Uint32(s.Fields[0:4]))
}
// SetTimerID sets the si_timerid field.
func (s *SignalInfo) SetTimerID(val linux.TimerID) {
- usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val))
}
// Overrun returns the si_overrun field.
func (s *SignalInfo) Overrun() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[4:8]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8]))
}
// SetOverrun sets the si_overrun field.
func (s *SignalInfo) SetOverrun(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val))
}
// Addr returns the si_addr field.
func (s *SignalInfo) Addr() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[0:8])
+ return hostarch.ByteOrder.Uint64(s.Fields[0:8])
}
// SetAddr sets the si_addr field.
func (s *SignalInfo) SetAddr(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], val)
}
// Status returns the si_status field.
func (s *SignalInfo) Status() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12]))
}
// SetStatus mutates the si_status field.
func (s *SignalInfo) SetStatus(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
}
// CallAddr returns the si_call_addr field.
func (s *SignalInfo) CallAddr() uint64 {
- return usermem.ByteOrder.Uint64(s.Fields[0:8])
+ return hostarch.ByteOrder.Uint64(s.Fields[0:8])
}
// SetCallAddr mutates the si_call_addr field.
func (s *SignalInfo) SetCallAddr(val uint64) {
- usermem.ByteOrder.PutUint64(s.Fields[0:8], val)
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], val)
}
// Syscall returns the si_syscall field.
func (s *SignalInfo) Syscall() int32 {
- return int32(usermem.ByteOrder.Uint32(s.Fields[8:12]))
+ return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12]))
}
// SetSyscall mutates the si_syscall field.
func (s *SignalInfo) SetSyscall(val int32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val))
}
// Arch returns the si_arch field.
func (s *SignalInfo) Arch() uint32 {
- return usermem.ByteOrder.Uint32(s.Fields[12:16])
+ return hostarch.ByteOrder.Uint32(s.Fields[12:16])
}
// SetArch mutates the si_arch field.
func (s *SignalInfo) SetArch(val uint32) {
- usermem.ByteOrder.PutUint32(s.Fields[12:16], val)
+ hostarch.ByteOrder.PutUint32(s.Fields[12:16], val)
}
// Band returns the si_band field.
func (s *SignalInfo) Band() int64 {
- return int64(usermem.ByteOrder.Uint64(s.Fields[0:8]))
+ return int64(hostarch.ByteOrder.Uint64(s.Fields[0:8]))
}
// SetBand mutates the si_band field.
@@ -262,15 +262,15 @@ func (s *SignalInfo) SetBand(val int64) {
// Note: this assumes the platform uses `long` as `__ARCH_SI_BAND_T`.
// On some platforms, which gVisor doesn't support, `__ARCH_SI_BAND_T` is
// `int`. See siginfo.h.
- usermem.ByteOrder.PutUint64(s.Fields[0:8], uint64(val))
+ hostarch.ByteOrder.PutUint64(s.Fields[0:8], uint64(val))
}
// FD returns the si_fd field.
func (s *SignalInfo) FD() uint32 {
- return usermem.ByteOrder.Uint32(s.Fields[8:12])
+ return hostarch.ByteOrder.Uint32(s.Fields[8:12])
}
// SetFD mutates the si_fd field.
func (s *SignalInfo) SetFD(val uint32) {
- usermem.ByteOrder.PutUint32(s.Fields[8:12], val)
+ hostarch.ByteOrder.PutUint32(s.Fields[8:12], val)
}
diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go
index ee3743483..082ed92b1 100644
--- a/pkg/sentry/arch/signal_amd64.go
+++ b/pkg/sentry/arch/signal_amd64.go
@@ -21,10 +21,10 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
@@ -133,7 +133,7 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
// space on the user stack naturally caps the amount of memory the
// sentry will allocate for this purpose.
fpSize, _ := c.fpuFrameSize()
- sp = (sp - usermem.Addr(fpSize)) & ^usermem.Addr(63)
+ sp = (sp - hostarch.Addr(fpSize)) & ^hostarch.Addr(63)
// Construct the UContext64 now since we need its size.
uc := &UContext64{
@@ -180,8 +180,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
ucSize := uc.SizeBytes()
// st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128.
frameSize := int(st.Arch.Width()) + ucSize + 128
- frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8
- sp = frameBottom + usermem.Addr(frameSize)
+ frameBottom := (sp-hostarch.Addr(frameSize)) & ^hostarch.Addr(15) - 8
+ sp = frameBottom + hostarch.Addr(frameSize)
st.Bottom = sp
// Prior to proceeding, figure out if the frame will exhaust the range
diff --git a/pkg/sentry/arch/signal_arm64.go b/pkg/sentry/arch/signal_arm64.go
index 53281dcba..da71fb873 100644
--- a/pkg/sentry/arch/signal_arm64.go
+++ b/pkg/sentry/arch/signal_arm64.go
@@ -19,9 +19,9 @@ package arch
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
- "gvisor.dev/gvisor/pkg/usermem"
)
// SignalContext64 is equivalent to struct sigcontext, the type passed as the
@@ -107,8 +107,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt
// sizeof(siginfo) == 128.
// R30 stores the restorer address.
frameSize := ucSize + 128
- frameBottom := (sp - usermem.Addr(frameSize)) & ^usermem.Addr(15)
- sp = frameBottom + usermem.Addr(frameSize)
+ frameBottom := (sp - hostarch.Addr(frameSize)) & ^hostarch.Addr(15)
+ sp = frameBottom + hostarch.Addr(frameSize)
st.Bottom = sp
// Prior to proceeding, figure out if the frame will exhaust the range
diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go
index a1eae98f9..c732c7503 100644
--- a/pkg/sentry/arch/signal_stack.go
+++ b/pkg/sentry/arch/signal_stack.go
@@ -17,8 +17,8 @@
package arch
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/usermem"
)
const (
@@ -36,8 +36,8 @@ func (s SignalStack) IsEnabled() bool {
}
// Top returns the stack's top address.
-func (s SignalStack) Top() usermem.Addr {
- return usermem.Addr(s.Addr + s.Size)
+func (s SignalStack) Top() hostarch.Addr {
+ return hostarch.Addr(s.Addr + s.Size)
}
// SetOnStack marks this signal stack as in use.
@@ -49,8 +49,8 @@ func (s *SignalStack) SetOnStack() {
}
// Contains checks if the stack pointer is within this stack.
-func (s *SignalStack) Contains(sp usermem.Addr) bool {
- return usermem.Addr(s.Addr) < sp && sp <= usermem.Addr(s.Addr+s.Size)
+func (s *SignalStack) Contains(sp hostarch.Addr) bool {
+ return hostarch.Addr(s.Addr) < sp && sp <= hostarch.Addr(s.Addr+s.Size)
}
// NativeSignalStack is a type that is equivalent to stack_t in the guest
diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go
index 5f06c751d..65a794c7c 100644
--- a/pkg/sentry/arch/stack.go
+++ b/pkg/sentry/arch/stack.go
@@ -16,18 +16,20 @@ package arch
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
+
"gvisor.dev/gvisor/pkg/usermem"
)
-// Stack is a simple wrapper around a usermem.IO and an address. Stack
+// Stack is a simple wrapper around a hostarch.IO and an address. Stack
// implements marshal.CopyContext, and marshallable values can be pushed or
// popped from the stack through the marshal.Marshallable interface.
//
// Stack is not thread-safe.
type Stack struct {
// Our arch info.
- // We use this for automatic Native conversion of usermem.Addrs during
+ // We use this for automatic Native conversion of hostarch.Addrs during
// Push() and Pop().
Arch Context
@@ -35,7 +37,7 @@ type Stack struct {
IO usermem.IO
// Our current stack bottom.
- Bottom usermem.Addr
+ Bottom hostarch.Addr
// Scratch buffer used for marshalling to avoid having to repeatedly
// allocate scratch memory.
@@ -59,20 +61,20 @@ func (s *Stack) CopyScratchBuffer(size int) []byte {
// StackBottomMagic is the special address callers must past to all stack
// marshalling operations to cause the src/dst address to be computed based on
// the current end of the stack.
-const StackBottomMagic = ^usermem.Addr(0) // usermem.Addr(-1)
+const StackBottomMagic = ^hostarch.Addr(0) // hostarch.Addr(-1)
// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. CopyOutBytes
// computes an appropriate address based on the current end of the
// stack. Callers use the sentinel address StackBottomMagic to marshal methods
// to indicate this.
-func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) {
+func (s *Stack) CopyOutBytes(sentinel hostarch.Addr, b []byte) (int, error) {
if sentinel != StackBottomMagic {
panic("Attempted to copy out to stack with absolute address")
}
c := len(b)
- n, err := s.IO.CopyOut(context.Background(), s.Bottom-usermem.Addr(c), b, usermem.IOOpts{})
+ n, err := s.IO.CopyOut(context.Background(), s.Bottom-hostarch.Addr(c), b, usermem.IOOpts{})
if err == nil && n == c {
- s.Bottom -= usermem.Addr(n)
+ s.Bottom -= hostarch.Addr(n)
}
return n, err
}
@@ -81,21 +83,21 @@ func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) {
// an appropriate address based on the current end of the stack. Callers must
// use the sentinel address StackBottomMagic to marshal methods to indicate
// this.
-func (s *Stack) CopyInBytes(sentinel usermem.Addr, b []byte) (int, error) {
+func (s *Stack) CopyInBytes(sentinel hostarch.Addr, b []byte) (int, error) {
if sentinel != StackBottomMagic {
panic("Attempted to copy in from stack with absolute address")
}
n, err := s.IO.CopyIn(context.Background(), s.Bottom, b, usermem.IOOpts{})
if err == nil {
- s.Bottom += usermem.Addr(n)
+ s.Bottom += hostarch.Addr(n)
}
return n, err
}
// Align aligns the stack to the given offset.
func (s *Stack) Align(offset int) {
- if s.Bottom%usermem.Addr(offset) != 0 {
- s.Bottom -= (s.Bottom % usermem.Addr(offset))
+ if s.Bottom%hostarch.Addr(offset) != 0 {
+ s.Bottom -= (s.Bottom % hostarch.Addr(offset))
}
}
@@ -119,16 +121,16 @@ func (s *Stack) PushNullTerminatedByteSlice(bs []byte) (int, error) {
// stack.
type StackLayout struct {
// ArgvStart is the beginning of the argument vector.
- ArgvStart usermem.Addr
+ ArgvStart hostarch.Addr
// ArgvEnd is the end of the argument vector.
- ArgvEnd usermem.Addr
+ ArgvEnd hostarch.Addr
// EnvvStart is the beginning of the environment vector.
- EnvvStart usermem.Addr
+ EnvvStart hostarch.Addr
// EnvvEnd is the end of the environment vector.
- EnvvEnd usermem.Addr
+ EnvvEnd hostarch.Addr
}
// Load pushes the given args, env and aux vector to the stack using the
@@ -148,7 +150,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// to be in this order. See: https://www.uclibc.org/docs/psABI-x86_64.pdf
// page 29.
l.EnvvEnd = s.Bottom
- envAddrs := make([]usermem.Addr, len(env))
+ envAddrs := make([]hostarch.Addr, len(env))
for i := len(env) - 1; i >= 0; i-- {
if _, err := s.PushNullTerminatedByteSlice([]byte(env[i])); err != nil {
return StackLayout{}, err
@@ -159,7 +161,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// Push our strings.
l.ArgvEnd = s.Bottom
- argAddrs := make([]usermem.Addr, len(args))
+ argAddrs := make([]hostarch.Addr, len(args))
for i := len(args) - 1; i >= 0; i-- {
if _, err := s.PushNullTerminatedByteSlice([]byte(args[i])); err != nil {
return StackLayout{}, err
@@ -178,7 +180,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
argvSize := s.Arch.Width() * uint(len(args)+1)
envvSize := s.Arch.Width() * uint(len(env)+1)
auxvSize := s.Arch.Width() * 2 * uint(len(aux)+1)
- total := usermem.Addr(argvSize) + usermem.Addr(envvSize) + usermem.Addr(auxvSize) + usermem.Addr(s.Arch.Width())
+ total := hostarch.Addr(argvSize) + hostarch.Addr(envvSize) + hostarch.Addr(auxvSize) + hostarch.Addr(s.Arch.Width())
expectedBottom := s.Bottom - total
if expectedBottom%32 != 0 {
s.Bottom -= expectedBottom % 32
@@ -188,11 +190,11 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error)
// NOTE: We need an extra zero here per spec.
// The Push function will automatically terminate
// strings and arrays with a single null value.
- auxv := make([]usermem.Addr, 0, len(aux))
+ auxv := make([]hostarch.Addr, 0, len(aux))
for _, a := range aux {
- auxv = append(auxv, usermem.Addr(a.Key), a.Value)
+ auxv = append(auxv, hostarch.Addr(a.Key), a.Value)
}
- auxv = append(auxv, usermem.Addr(0))
+ auxv = append(auxv, hostarch.Addr(0))
_, err := s.pushAddrSliceAndTerminator(auxv)
if err != nil {
return StackLayout{}, err
diff --git a/pkg/sentry/arch/stack_unsafe.go b/pkg/sentry/arch/stack_unsafe.go
index 0e478e434..f4712d58f 100644
--- a/pkg/sentry/arch/stack_unsafe.go
+++ b/pkg/sentry/arch/stack_unsafe.go
@@ -17,19 +17,19 @@ package arch
import (
"unsafe"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/usermem"
)
// pushAddrSliceAndTerminator copies a slices of addresses to the stack, and
// also pushes an extra null address element at the end of the slice.
//
// Internally, we unsafely transmute the slice type from the arch-dependent
-// []usermem.Addr type, to a slice of fixed-sized ints so that we can pass it to
+// []hostarch.Addr type, to a slice of fixed-sized ints so that we can pass it to
// go-marshal.
//
// On error, the contents of the stack and the bottom cursor are undefined.
-func (s *Stack) pushAddrSliceAndTerminator(src []usermem.Addr) (int, error) {
+func (s *Stack) pushAddrSliceAndTerminator(src []hostarch.Addr) (int, error) {
// Note: Stack grows upwards, so push the terminator first.
switch s.Arch.Width() {
case 8: