diff options
Diffstat (limited to 'pkg/sentry')
252 files changed, 1656 insertions, 1518 deletions
diff --git a/pkg/sentry/arch/BUILD b/pkg/sentry/arch/BUILD index f660f1614..c9c52530d 100644 --- a/pkg/sentry/arch/BUILD +++ b/pkg/sentry/arch/BUILD @@ -32,6 +32,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/cpuid", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", diff --git a/pkg/sentry/arch/arch.go b/pkg/sentry/arch/arch.go index 921151137..290863ee6 100644 --- a/pkg/sentry/arch/arch.go +++ b/pkg/sentry/arch/arch.go @@ -22,11 +22,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/limits" - "gvisor.dev/gvisor/pkg/usermem" ) // Arch describes an architecture. @@ -188,11 +188,11 @@ type Context interface { // returned layout must be no lower than min, and MaxAddr for the returned // layout must be no higher than max. Repeated calls to NewMmapLayout may // return different layouts. - NewMmapLayout(min, max usermem.Addr, limits *limits.LimitSet) (MmapLayout, error) + NewMmapLayout(min, max hostarch.Addr, limits *limits.LimitSet) (MmapLayout, error) // PIELoadAddress returns a preferred load address for a // position-independent executable within l. - PIELoadAddress(l MmapLayout) usermem.Addr + PIELoadAddress(l MmapLayout) hostarch.Addr // FeatureSet returns the FeatureSet in use in this context. FeatureSet() *cpuid.FeatureSet @@ -257,18 +257,18 @@ const ( // +stateify savable type MmapLayout struct { // MinAddr is the lowest mappable address. - MinAddr usermem.Addr + MinAddr hostarch.Addr // MaxAddr is the highest mappable address. - MaxAddr usermem.Addr + MaxAddr hostarch.Addr // BottomUpBase is the lowest address that may be returned for a // MmapBottomUp mmap. - BottomUpBase usermem.Addr + BottomUpBase hostarch.Addr // TopDownBase is the highest address that may be returned for a // MmapTopDown mmap. - TopDownBase usermem.Addr + TopDownBase hostarch.Addr // DefaultDirection is the direction for most non-fixed mmaps in this // layout. @@ -316,9 +316,9 @@ type SyscallArgument struct { // SyscallArguments represents the set of arguments passed to a syscall. type SyscallArguments [6]SyscallArgument -// Pointer returns the usermem.Addr representation of a pointer argument. -func (a SyscallArgument) Pointer() usermem.Addr { - return usermem.Addr(a.Value) +// Pointer returns the hostarch.Addr representation of a pointer argument. +func (a SyscallArgument) Pointer() hostarch.Addr { + return hostarch.Addr(a.Value) } // Int returns the int32 representation of a 32-bit signed integer argument. diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go index 2571be60f..d6b4d2357 100644 --- a/pkg/sentry/arch/arch_amd64.go +++ b/pkg/sentry/arch/arch_amd64.go @@ -23,11 +23,11 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/limits" - "gvisor.dev/gvisor/pkg/usermem" ) // Host specifies the host architecture. @@ -37,7 +37,7 @@ const Host = AMD64 const ( // maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux // for a 64-bit process. - maxAddr64 usermem.Addr = (1 << 47) - usermem.PageSize + maxAddr64 hostarch.Addr = (1 << 47) - hostarch.PageSize // maxStackRand64 is the maximum randomization to apply to the stack. // It is defined by arch/x86/mm/mmap.c:stack_maxrandom_size in Linux. @@ -45,7 +45,7 @@ const ( // maxMmapRand64 is the maximum randomization to apply to the mmap // layout. It is defined by arch/x86/mm/mmap.c:arch_mmap_rnd in Linux. - maxMmapRand64 = (1 << 28) * usermem.PageSize + maxMmapRand64 = (1 << 28) * hostarch.PageSize // minGap64 is the minimum gap to leave at the top of the address space // for the stack. It is defined by arch/x86/mm/mmap.c:MIN_GAP in Linux. @@ -56,7 +56,7 @@ const ( // // The Platform {Min,Max}UserAddress() may preclude loading at this // address. See other preferredFoo comments below. - preferredPIELoadAddr usermem.Addr = maxAddr64 / 3 * 2 + preferredPIELoadAddr hostarch.Addr = maxAddr64 / 3 * 2 ) // These constants are selected as heuristics to help make the Platform's @@ -92,13 +92,13 @@ const ( // This is all "preferred" because the layout min/max address may not // allow us to select such a TopDownBase, in which case we have to fall // back to a layout that TSAN may not be happy with. - preferredTopDownAllocMin usermem.Addr = 0x7e8000000000 - preferredAllocationGap = 128 << 30 // 128 GB - preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap + preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000 + preferredAllocationGap = 128 << 30 // 128 GB + preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap // minMmapRand64 is the smallest we are willing to make the // randomization to stay above preferredTopDownBaseMin. - minMmapRand64 = (1 << 26) * usermem.PageSize + minMmapRand64 = (1 << 26) * hostarch.PageSize ) // context64 represents an AMD64 context. @@ -207,12 +207,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet { } // mmapRand returns a random adjustment for randomizing an mmap layout. -func mmapRand(max uint64) usermem.Addr { - return usermem.Addr(rand.Int63n(int64(max))).RoundDown() +func mmapRand(max uint64) hostarch.Addr { + return hostarch.Addr(rand.Int63n(int64(max))).RoundDown() } // NewMmapLayout implements Context.NewMmapLayout consistently with Linux. -func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) { +func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) { min, ok := min.RoundUp() if !ok { return MmapLayout{}, unix.EINVAL @@ -230,7 +230,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm // MAX_GAP in Linux. maxGap := (max / 6) * 5 - gap := usermem.Addr(stackSize.Cur) + gap := hostarch.Addr(stackSize.Cur) if gap < minGap64 { gap = minGap64 } @@ -243,7 +243,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm } topDownMin := max - gap - maxMmapRand64 - maxRand := usermem.Addr(maxMmapRand64) + maxRand := hostarch.Addr(maxMmapRand64) if topDownMin < preferredTopDownBaseMin { // Try to keep TopDownBase above preferredTopDownBaseMin by // shrinking maxRand. @@ -278,7 +278,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm } // PIELoadAddress implements Context.PIELoadAddress. -func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr { +func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr { base := preferredPIELoadAddr max, ok := base.AddLength(maxMmapRand64) if !ok { @@ -311,7 +311,7 @@ func (c *context64) PtracePeekUser(addr uintptr) (marshal.Marshallable, error) { regs := c.ptraceGetRegs() buf := make([]byte, regs.SizeBytes()) regs.MarshalUnsafe(buf) - return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil + return c.Native(uintptr(hostarch.ByteOrder.Uint64(buf[addr:]))), nil } // Note: x86 debug registers are missing. return c.Native(0), nil @@ -326,7 +326,7 @@ func (c *context64) PtracePokeUser(addr, data uintptr) error { regs := c.ptraceGetRegs() buf := make([]byte, regs.SizeBytes()) regs.MarshalUnsafe(buf) - usermem.ByteOrder.PutUint64(buf[addr:], uint64(data)) + hostarch.ByteOrder.PutUint64(buf[addr:], uint64(data)) _, err := c.PtraceSetRegs(bytes.NewBuffer(buf)) return err } diff --git a/pkg/sentry/arch/arch_arm64.go b/pkg/sentry/arch/arch_arm64.go index 14ad9483b..348f238fd 100644 --- a/pkg/sentry/arch/arch_arm64.go +++ b/pkg/sentry/arch/arch_arm64.go @@ -22,11 +22,11 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/limits" - "gvisor.dev/gvisor/pkg/usermem" ) // Host specifies the host architecture. @@ -36,7 +36,7 @@ const Host = ARM64 const ( // maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux // for a 64-bit process. - maxAddr64 usermem.Addr = (1 << 48) + maxAddr64 hostarch.Addr = (1 << 48) // maxStackRand64 is the maximum randomization to apply to the stack. // It is defined by arch/arm64/mm/mmap.c:(STACK_RND_MASK << PAGE_SHIFT) in Linux. @@ -44,7 +44,7 @@ const ( // maxMmapRand64 is the maximum randomization to apply to the mmap // layout. It is defined by arch/arm64/mm/mmap.c:arch_mmap_rnd in Linux. - maxMmapRand64 = (1 << 33) * usermem.PageSize + maxMmapRand64 = (1 << 33) * hostarch.PageSize // minGap64 is the minimum gap to leave at the top of the address space // for the stack. It is defined by arch/arm64/mm/mmap.c:MIN_GAP in Linux. @@ -55,7 +55,7 @@ const ( // // The Platform {Min,Max}UserAddress() may preclude loading at this // address. See other preferredFoo comments below. - preferredPIELoadAddr usermem.Addr = maxAddr64 / 6 * 5 + preferredPIELoadAddr hostarch.Addr = maxAddr64 / 6 * 5 ) var ( @@ -66,13 +66,13 @@ var ( // These constants are selected as heuristics to help make the Platform's // potentially limited address space conform as closely to Linux as possible. const ( - preferredTopDownAllocMin usermem.Addr = 0x7e8000000000 - preferredAllocationGap = 128 << 30 // 128 GB - preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap + preferredTopDownAllocMin hostarch.Addr = 0x7e8000000000 + preferredAllocationGap = 128 << 30 // 128 GB + preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap // minMmapRand64 is the smallest we are willing to make the // randomization to stay above preferredTopDownBaseMin. - minMmapRand64 = (1 << 18) * usermem.PageSize + minMmapRand64 = (1 << 18) * hostarch.PageSize ) // context64 represents an ARM64 context. @@ -187,12 +187,12 @@ func (c *context64) FeatureSet() *cpuid.FeatureSet { } // mmapRand returns a random adjustment for randomizing an mmap layout. -func mmapRand(max uint64) usermem.Addr { - return usermem.Addr(rand.Int63n(int64(max))).RoundDown() +func mmapRand(max uint64) hostarch.Addr { + return hostarch.Addr(rand.Int63n(int64(max))).RoundDown() } // NewMmapLayout implements Context.NewMmapLayout consistently with Linux. -func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) { +func (c *context64) NewMmapLayout(min, max hostarch.Addr, r *limits.LimitSet) (MmapLayout, error) { min, ok := min.RoundUp() if !ok { return MmapLayout{}, unix.EINVAL @@ -210,7 +210,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm // MAX_GAP in Linux. maxGap := (max / 6) * 5 - gap := usermem.Addr(stackSize.Cur) + gap := hostarch.Addr(stackSize.Cur) if gap < minGap64 { gap = minGap64 } @@ -223,7 +223,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm } topDownMin := max - gap - maxMmapRand64 - maxRand := usermem.Addr(maxMmapRand64) + maxRand := hostarch.Addr(maxMmapRand64) if topDownMin < preferredTopDownBaseMin { // Try to keep TopDownBase above preferredTopDownBaseMin by // shrinking maxRand. @@ -258,7 +258,7 @@ func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (Mm } // PIELoadAddress implements Context.PIELoadAddress. -func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr { +func (c *context64) PIELoadAddress(l MmapLayout) hostarch.Addr { base := preferredPIELoadAddr max, ok := base.AddLength(maxMmapRand64) if !ok { diff --git a/pkg/sentry/arch/auxv.go b/pkg/sentry/arch/auxv.go index 2b4c8f3fc..19ca18121 100644 --- a/pkg/sentry/arch/auxv.go +++ b/pkg/sentry/arch/auxv.go @@ -15,7 +15,7 @@ package arch import ( - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // An AuxEntry represents an entry in an ELF auxiliary vector. @@ -23,7 +23,7 @@ import ( // +stateify savable type AuxEntry struct { Key uint64 - Value usermem.Addr + Value hostarch.Addr } // An Auxv represents an ELF auxiliary vector. diff --git a/pkg/sentry/arch/fpu/BUILD b/pkg/sentry/arch/fpu/BUILD index 0a5395267..4e4f20639 100644 --- a/pkg/sentry/arch/fpu/BUILD +++ b/pkg/sentry/arch/fpu/BUILD @@ -13,9 +13,9 @@ go_library( visibility = ["//:sandbox"], deps = [ "//pkg/cpuid", + "//pkg/hostarch", "//pkg/sync", "//pkg/syserror", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/arch/fpu/fpu_amd64.go b/pkg/sentry/arch/fpu/fpu_amd64.go index 3a62f51be..1e9625bee 100644 --- a/pkg/sentry/arch/fpu/fpu_amd64.go +++ b/pkg/sentry/arch/fpu/fpu_amd64.go @@ -21,9 +21,9 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // initX86FPState (defined in asm files) sets up initial state. @@ -146,11 +146,11 @@ const ( // any of the reserved bits of the MXCSR register." - Intel SDM Vol. 1, Section // 10.5.1.2 "SSE State") func sanitizeMXCSR(f State) { - mxcsr := usermem.ByteOrder.Uint32(f[mxcsrOffset:]) + mxcsr := hostarch.ByteOrder.Uint32(f[mxcsrOffset:]) initMXCSRMask.Do(func() { temp := State(alignedBytes(uint(ptraceFPRegsSize), 16)) initX86FPState(&temp[0], false /* useXsave */) - mxcsrMask = usermem.ByteOrder.Uint32(temp[mxcsrMaskOffset:]) + mxcsrMask = hostarch.ByteOrder.Uint32(temp[mxcsrMaskOffset:]) if mxcsrMask == 0 { // "If the value of the MXCSR_MASK field is 00000000H, then the // MXCSR_MASK value is the default value of 0000FFBFH." - Intel SDM @@ -160,7 +160,7 @@ func sanitizeMXCSR(f State) { } }) mxcsr &= mxcsrMask - usermem.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr) + hostarch.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr) } // PtraceGetXstateRegs implements ptrace(PTRACE_GETREGS, NT_X86_XSTATE) by @@ -177,7 +177,7 @@ func (s *State) PtraceGetXstateRegs(dst io.Writer, maxlen int, featureSet *cpuid // Area". Linux uses the first 8 bytes of this area to store the OS XSTATE // mask. GDB relies on this: see // gdb/x86-linux-nat.c:x86_linux_read_description(). - usermem.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask()) + hostarch.ByteOrder.PutUint64(f[userXstateXCR0Offset:], featureSet.ValidXCR0Mask()) if len(f) > maxlen { f = f[:maxlen] } @@ -208,9 +208,9 @@ func (s *State) PtraceSetXstateRegs(src io.Reader, maxlen int, featureSet *cpuid // Force reserved bits in MXCSR to 0. This is consistent with Linux. sanitizeMXCSR(State(f)) // Users can't enable *more* XCR0 bits than what we, and the CPU, support. - xstateBV := usermem.ByteOrder.Uint64(f[xstateBVOffset:]) + xstateBV := hostarch.ByteOrder.Uint64(f[xstateBVOffset:]) xstateBV &= featureSet.ValidXCR0Mask() - usermem.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV) + hostarch.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV) // Force XCOMP_BV and reserved bytes in the XSAVE header to 0. reserved := f[xsaveHeaderZeroedOffset : xsaveHeaderZeroedOffset+xsaveHeaderZeroedBytes] for i := range reserved { @@ -266,7 +266,7 @@ func (s *State) AfterLoad() { // What was in use? savedBV := fxsaveBV if len(old) >= xstateBVOffset+8 { - savedBV = usermem.ByteOrder.Uint64(old[xstateBVOffset:]) + savedBV = hostarch.ByteOrder.Uint64(old[xstateBVOffset:]) } // Supported features must be a superset of saved features. diff --git a/pkg/sentry/arch/signal.go b/pkg/sentry/arch/signal.go index 35d2e07c3..67d7edf68 100644 --- a/pkg/sentry/arch/signal.go +++ b/pkg/sentry/arch/signal.go @@ -16,7 +16,7 @@ package arch import ( "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // SignalAct represents the action that should be taken when a signal is @@ -154,107 +154,107 @@ func (s *SignalInfo) FixSignalCodeForUser() { // PID returns the si_pid field. func (s *SignalInfo) PID() int32 { - return int32(usermem.ByteOrder.Uint32(s.Fields[0:4])) + return int32(hostarch.ByteOrder.Uint32(s.Fields[0:4])) } // SetPID mutates the si_pid field. func (s *SignalInfo) SetPID(val int32) { - usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) } // UID returns the si_uid field. func (s *SignalInfo) UID() int32 { - return int32(usermem.ByteOrder.Uint32(s.Fields[4:8])) + return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8])) } // SetUID mutates the si_uid field. func (s *SignalInfo) SetUID(val int32) { - usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) } // Sigval returns the sigval field, which is aliased to both si_int and si_ptr. func (s *SignalInfo) Sigval() uint64 { - return usermem.ByteOrder.Uint64(s.Fields[8:16]) + return hostarch.ByteOrder.Uint64(s.Fields[8:16]) } // SetSigval mutates the sigval field. func (s *SignalInfo) SetSigval(val uint64) { - usermem.ByteOrder.PutUint64(s.Fields[8:16], val) + hostarch.ByteOrder.PutUint64(s.Fields[8:16], val) } // TimerID returns the si_timerid field. func (s *SignalInfo) TimerID() linux.TimerID { - return linux.TimerID(usermem.ByteOrder.Uint32(s.Fields[0:4])) + return linux.TimerID(hostarch.ByteOrder.Uint32(s.Fields[0:4])) } // SetTimerID sets the si_timerid field. func (s *SignalInfo) SetTimerID(val linux.TimerID) { - usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) } // Overrun returns the si_overrun field. func (s *SignalInfo) Overrun() int32 { - return int32(usermem.ByteOrder.Uint32(s.Fields[4:8])) + return int32(hostarch.ByteOrder.Uint32(s.Fields[4:8])) } // SetOverrun sets the si_overrun field. func (s *SignalInfo) SetOverrun(val int32) { - usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) } // Addr returns the si_addr field. func (s *SignalInfo) Addr() uint64 { - return usermem.ByteOrder.Uint64(s.Fields[0:8]) + return hostarch.ByteOrder.Uint64(s.Fields[0:8]) } // SetAddr sets the si_addr field. func (s *SignalInfo) SetAddr(val uint64) { - usermem.ByteOrder.PutUint64(s.Fields[0:8], val) + hostarch.ByteOrder.PutUint64(s.Fields[0:8], val) } // Status returns the si_status field. func (s *SignalInfo) Status() int32 { - return int32(usermem.ByteOrder.Uint32(s.Fields[8:12])) + return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12])) } // SetStatus mutates the si_status field. func (s *SignalInfo) SetStatus(val int32) { - usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) } // CallAddr returns the si_call_addr field. func (s *SignalInfo) CallAddr() uint64 { - return usermem.ByteOrder.Uint64(s.Fields[0:8]) + return hostarch.ByteOrder.Uint64(s.Fields[0:8]) } // SetCallAddr mutates the si_call_addr field. func (s *SignalInfo) SetCallAddr(val uint64) { - usermem.ByteOrder.PutUint64(s.Fields[0:8], val) + hostarch.ByteOrder.PutUint64(s.Fields[0:8], val) } // Syscall returns the si_syscall field. func (s *SignalInfo) Syscall() int32 { - return int32(usermem.ByteOrder.Uint32(s.Fields[8:12])) + return int32(hostarch.ByteOrder.Uint32(s.Fields[8:12])) } // SetSyscall mutates the si_syscall field. func (s *SignalInfo) SetSyscall(val int32) { - usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) + hostarch.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) } // Arch returns the si_arch field. func (s *SignalInfo) Arch() uint32 { - return usermem.ByteOrder.Uint32(s.Fields[12:16]) + return hostarch.ByteOrder.Uint32(s.Fields[12:16]) } // SetArch mutates the si_arch field. func (s *SignalInfo) SetArch(val uint32) { - usermem.ByteOrder.PutUint32(s.Fields[12:16], val) + hostarch.ByteOrder.PutUint32(s.Fields[12:16], val) } // Band returns the si_band field. func (s *SignalInfo) Band() int64 { - return int64(usermem.ByteOrder.Uint64(s.Fields[0:8])) + return int64(hostarch.ByteOrder.Uint64(s.Fields[0:8])) } // SetBand mutates the si_band field. @@ -262,15 +262,15 @@ func (s *SignalInfo) SetBand(val int64) { // Note: this assumes the platform uses `long` as `__ARCH_SI_BAND_T`. // On some platforms, which gVisor doesn't support, `__ARCH_SI_BAND_T` is // `int`. See siginfo.h. - usermem.ByteOrder.PutUint64(s.Fields[0:8], uint64(val)) + hostarch.ByteOrder.PutUint64(s.Fields[0:8], uint64(val)) } // FD returns the si_fd field. func (s *SignalInfo) FD() uint32 { - return usermem.ByteOrder.Uint32(s.Fields[8:12]) + return hostarch.ByteOrder.Uint32(s.Fields[8:12]) } // SetFD mutates the si_fd field. func (s *SignalInfo) SetFD(val uint32) { - usermem.ByteOrder.PutUint32(s.Fields[8:12], val) + hostarch.ByteOrder.PutUint32(s.Fields[8:12], val) } diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go index ee3743483..082ed92b1 100644 --- a/pkg/sentry/arch/signal_amd64.go +++ b/pkg/sentry/arch/signal_amd64.go @@ -21,10 +21,10 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" - "gvisor.dev/gvisor/pkg/usermem" ) // SignalContext64 is equivalent to struct sigcontext, the type passed as the @@ -133,7 +133,7 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt // space on the user stack naturally caps the amount of memory the // sentry will allocate for this purpose. fpSize, _ := c.fpuFrameSize() - sp = (sp - usermem.Addr(fpSize)) & ^usermem.Addr(63) + sp = (sp - hostarch.Addr(fpSize)) & ^hostarch.Addr(63) // Construct the UContext64 now since we need its size. uc := &UContext64{ @@ -180,8 +180,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt ucSize := uc.SizeBytes() // st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128. frameSize := int(st.Arch.Width()) + ucSize + 128 - frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8 - sp = frameBottom + usermem.Addr(frameSize) + frameBottom := (sp-hostarch.Addr(frameSize)) & ^hostarch.Addr(15) - 8 + sp = frameBottom + hostarch.Addr(frameSize) st.Bottom = sp // Prior to proceeding, figure out if the frame will exhaust the range diff --git a/pkg/sentry/arch/signal_arm64.go b/pkg/sentry/arch/signal_arm64.go index 53281dcba..da71fb873 100644 --- a/pkg/sentry/arch/signal_arm64.go +++ b/pkg/sentry/arch/signal_arm64.go @@ -19,9 +19,9 @@ package arch import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" - "gvisor.dev/gvisor/pkg/usermem" ) // SignalContext64 is equivalent to struct sigcontext, the type passed as the @@ -107,8 +107,8 @@ func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt // sizeof(siginfo) == 128. // R30 stores the restorer address. frameSize := ucSize + 128 - frameBottom := (sp - usermem.Addr(frameSize)) & ^usermem.Addr(15) - sp = frameBottom + usermem.Addr(frameSize) + frameBottom := (sp - hostarch.Addr(frameSize)) & ^hostarch.Addr(15) + sp = frameBottom + hostarch.Addr(frameSize) st.Bottom = sp // Prior to proceeding, figure out if the frame will exhaust the range diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go index a1eae98f9..c732c7503 100644 --- a/pkg/sentry/arch/signal_stack.go +++ b/pkg/sentry/arch/signal_stack.go @@ -17,8 +17,8 @@ package arch import ( + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" - "gvisor.dev/gvisor/pkg/usermem" ) const ( @@ -36,8 +36,8 @@ func (s SignalStack) IsEnabled() bool { } // Top returns the stack's top address. -func (s SignalStack) Top() usermem.Addr { - return usermem.Addr(s.Addr + s.Size) +func (s SignalStack) Top() hostarch.Addr { + return hostarch.Addr(s.Addr + s.Size) } // SetOnStack marks this signal stack as in use. @@ -49,8 +49,8 @@ func (s *SignalStack) SetOnStack() { } // Contains checks if the stack pointer is within this stack. -func (s *SignalStack) Contains(sp usermem.Addr) bool { - return usermem.Addr(s.Addr) < sp && sp <= usermem.Addr(s.Addr+s.Size) +func (s *SignalStack) Contains(sp hostarch.Addr) bool { + return hostarch.Addr(s.Addr) < sp && sp <= hostarch.Addr(s.Addr+s.Size) } // NativeSignalStack is a type that is equivalent to stack_t in the guest diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go index 5f06c751d..65a794c7c 100644 --- a/pkg/sentry/arch/stack.go +++ b/pkg/sentry/arch/stack.go @@ -16,18 +16,20 @@ package arch import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" + "gvisor.dev/gvisor/pkg/usermem" ) -// Stack is a simple wrapper around a usermem.IO and an address. Stack +// Stack is a simple wrapper around a hostarch.IO and an address. Stack // implements marshal.CopyContext, and marshallable values can be pushed or // popped from the stack through the marshal.Marshallable interface. // // Stack is not thread-safe. type Stack struct { // Our arch info. - // We use this for automatic Native conversion of usermem.Addrs during + // We use this for automatic Native conversion of hostarch.Addrs during // Push() and Pop(). Arch Context @@ -35,7 +37,7 @@ type Stack struct { IO usermem.IO // Our current stack bottom. - Bottom usermem.Addr + Bottom hostarch.Addr // Scratch buffer used for marshalling to avoid having to repeatedly // allocate scratch memory. @@ -59,20 +61,20 @@ func (s *Stack) CopyScratchBuffer(size int) []byte { // StackBottomMagic is the special address callers must past to all stack // marshalling operations to cause the src/dst address to be computed based on // the current end of the stack. -const StackBottomMagic = ^usermem.Addr(0) // usermem.Addr(-1) +const StackBottomMagic = ^hostarch.Addr(0) // hostarch.Addr(-1) // CopyOutBytes implements marshal.CopyContext.CopyOutBytes. CopyOutBytes // computes an appropriate address based on the current end of the // stack. Callers use the sentinel address StackBottomMagic to marshal methods // to indicate this. -func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) { +func (s *Stack) CopyOutBytes(sentinel hostarch.Addr, b []byte) (int, error) { if sentinel != StackBottomMagic { panic("Attempted to copy out to stack with absolute address") } c := len(b) - n, err := s.IO.CopyOut(context.Background(), s.Bottom-usermem.Addr(c), b, usermem.IOOpts{}) + n, err := s.IO.CopyOut(context.Background(), s.Bottom-hostarch.Addr(c), b, usermem.IOOpts{}) if err == nil && n == c { - s.Bottom -= usermem.Addr(n) + s.Bottom -= hostarch.Addr(n) } return n, err } @@ -81,21 +83,21 @@ func (s *Stack) CopyOutBytes(sentinel usermem.Addr, b []byte) (int, error) { // an appropriate address based on the current end of the stack. Callers must // use the sentinel address StackBottomMagic to marshal methods to indicate // this. -func (s *Stack) CopyInBytes(sentinel usermem.Addr, b []byte) (int, error) { +func (s *Stack) CopyInBytes(sentinel hostarch.Addr, b []byte) (int, error) { if sentinel != StackBottomMagic { panic("Attempted to copy in from stack with absolute address") } n, err := s.IO.CopyIn(context.Background(), s.Bottom, b, usermem.IOOpts{}) if err == nil { - s.Bottom += usermem.Addr(n) + s.Bottom += hostarch.Addr(n) } return n, err } // Align aligns the stack to the given offset. func (s *Stack) Align(offset int) { - if s.Bottom%usermem.Addr(offset) != 0 { - s.Bottom -= (s.Bottom % usermem.Addr(offset)) + if s.Bottom%hostarch.Addr(offset) != 0 { + s.Bottom -= (s.Bottom % hostarch.Addr(offset)) } } @@ -119,16 +121,16 @@ func (s *Stack) PushNullTerminatedByteSlice(bs []byte) (int, error) { // stack. type StackLayout struct { // ArgvStart is the beginning of the argument vector. - ArgvStart usermem.Addr + ArgvStart hostarch.Addr // ArgvEnd is the end of the argument vector. - ArgvEnd usermem.Addr + ArgvEnd hostarch.Addr // EnvvStart is the beginning of the environment vector. - EnvvStart usermem.Addr + EnvvStart hostarch.Addr // EnvvEnd is the end of the environment vector. - EnvvEnd usermem.Addr + EnvvEnd hostarch.Addr } // Load pushes the given args, env and aux vector to the stack using the @@ -148,7 +150,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) // to be in this order. See: https://www.uclibc.org/docs/psABI-x86_64.pdf // page 29. l.EnvvEnd = s.Bottom - envAddrs := make([]usermem.Addr, len(env)) + envAddrs := make([]hostarch.Addr, len(env)) for i := len(env) - 1; i >= 0; i-- { if _, err := s.PushNullTerminatedByteSlice([]byte(env[i])); err != nil { return StackLayout{}, err @@ -159,7 +161,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) // Push our strings. l.ArgvEnd = s.Bottom - argAddrs := make([]usermem.Addr, len(args)) + argAddrs := make([]hostarch.Addr, len(args)) for i := len(args) - 1; i >= 0; i-- { if _, err := s.PushNullTerminatedByteSlice([]byte(args[i])); err != nil { return StackLayout{}, err @@ -178,7 +180,7 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) argvSize := s.Arch.Width() * uint(len(args)+1) envvSize := s.Arch.Width() * uint(len(env)+1) auxvSize := s.Arch.Width() * 2 * uint(len(aux)+1) - total := usermem.Addr(argvSize) + usermem.Addr(envvSize) + usermem.Addr(auxvSize) + usermem.Addr(s.Arch.Width()) + total := hostarch.Addr(argvSize) + hostarch.Addr(envvSize) + hostarch.Addr(auxvSize) + hostarch.Addr(s.Arch.Width()) expectedBottom := s.Bottom - total if expectedBottom%32 != 0 { s.Bottom -= expectedBottom % 32 @@ -188,11 +190,11 @@ func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) // NOTE: We need an extra zero here per spec. // The Push function will automatically terminate // strings and arrays with a single null value. - auxv := make([]usermem.Addr, 0, len(aux)) + auxv := make([]hostarch.Addr, 0, len(aux)) for _, a := range aux { - auxv = append(auxv, usermem.Addr(a.Key), a.Value) + auxv = append(auxv, hostarch.Addr(a.Key), a.Value) } - auxv = append(auxv, usermem.Addr(0)) + auxv = append(auxv, hostarch.Addr(0)) _, err := s.pushAddrSliceAndTerminator(auxv) if err != nil { return StackLayout{}, err diff --git a/pkg/sentry/arch/stack_unsafe.go b/pkg/sentry/arch/stack_unsafe.go index 0e478e434..f4712d58f 100644 --- a/pkg/sentry/arch/stack_unsafe.go +++ b/pkg/sentry/arch/stack_unsafe.go @@ -17,19 +17,19 @@ package arch import ( "unsafe" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" - "gvisor.dev/gvisor/pkg/usermem" ) // pushAddrSliceAndTerminator copies a slices of addresses to the stack, and // also pushes an extra null address element at the end of the slice. // // Internally, we unsafely transmute the slice type from the arch-dependent -// []usermem.Addr type, to a slice of fixed-sized ints so that we can pass it to +// []hostarch.Addr type, to a slice of fixed-sized ints so that we can pass it to // go-marshal. // // On error, the contents of the stack and the bottom cursor are undefined. -func (s *Stack) pushAddrSliceAndTerminator(src []usermem.Addr) (int, error) { +func (s *Stack) pushAddrSliceAndTerminator(src []hostarch.Addr) (int, error) { // Note: Stack grows upwards, so push the terminator first. switch s.Arch.Width() { case 8: diff --git a/pkg/sentry/devices/tundev/BUILD b/pkg/sentry/devices/tundev/BUILD index 71c59287c..8b38d574d 100644 --- a/pkg/sentry/devices/tundev/BUILD +++ b/pkg/sentry/devices/tundev/BUILD @@ -9,6 +9,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/arch", "//pkg/sentry/fsimpl/devtmpfs", "//pkg/sentry/inet", diff --git a/pkg/sentry/devices/tundev/tundev.go b/pkg/sentry/devices/tundev/tundev.go index c43158aa4..a12eeb8e7 100644 --- a/pkg/sentry/devices/tundev/tundev.go +++ b/pkg/sentry/devices/tundev/tundev.go @@ -18,6 +18,7 @@ package tundev import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs" "gvisor.dev/gvisor/pkg/sentry/inet" @@ -89,7 +90,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg } // Validate flags. - flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:])) + flags, err := netstack.LinuxToTUNFlags(hostarch.ByteOrder.Uint16(req.Data[:])) if err != nil { return 0, err } @@ -98,7 +99,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg case linux.TUNGETIFF: var req linux.IFReq copy(req.IFName[:], fd.device.Name()) - usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(fd.device.Flags())) + hostarch.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(fd.device.Flags())) _, err := req.CopyOut(t, data) return 0, err diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD index 420fbae34..0dc100f9b 100644 --- a/pkg/sentry/fs/BUILD +++ b/pkg/sentry/fs/BUILD @@ -48,6 +48,7 @@ go_library( "//pkg/abi/linux", "//pkg/amutex", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/p9", "//pkg/refs", diff --git a/pkg/sentry/fs/anon/BUILD b/pkg/sentry/fs/anon/BUILD index aedcecfa1..1ce56d79f 100644 --- a/pkg/sentry/fs/anon/BUILD +++ b/pkg/sentry/fs/anon/BUILD @@ -12,9 +12,9 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/device", "//pkg/sentry/fs", "//pkg/sentry/fs/fsutil", - "//pkg/usermem", ], ) diff --git a/pkg/sentry/fs/anon/anon.go b/pkg/sentry/fs/anon/anon.go index 5c421f5fb..8bda22a8e 100644 --- a/pkg/sentry/fs/anon/anon.go +++ b/pkg/sentry/fs/anon/anon.go @@ -19,9 +19,9 @@ package anon import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" - "gvisor.dev/gvisor/pkg/usermem" ) // NewInode constructs an anonymous Inode that is not associated @@ -37,6 +37,6 @@ func NewInode(ctx context.Context) *fs.Inode { Type: fs.Anonymous, DeviceID: PseudoDevice.DeviceID(), InodeID: PseudoDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, }) } diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go index 58deb25fc..5aa668873 100644 --- a/pkg/sentry/fs/copy_up.go +++ b/pkg/sentry/fs/copy_up.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sync" @@ -339,7 +340,7 @@ func cleanupUpper(ctx context.Context, parent *Inode, name string, copyUpErr err // size is the same used by io.Copy. var copyUpBuffers = sync.Pool{ New: func() interface{} { - b := make([]byte, 8*usermem.PageSize) + b := make([]byte, 8*hostarch.PageSize) return &b }, } diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD index 9379a4d7b..23a3a9a2d 100644 --- a/pkg/sentry/fs/dev/BUILD +++ b/pkg/sentry/fs/dev/BUILD @@ -18,6 +18,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/rand", "//pkg/safemem", "//pkg/sentry/arch", diff --git a/pkg/sentry/fs/dev/dev.go b/pkg/sentry/fs/dev/dev.go index acbd401a0..e84ba7a5d 100644 --- a/pkg/sentry/fs/dev/dev.go +++ b/pkg/sentry/fs/dev/dev.go @@ -19,6 +19,7 @@ import ( "math" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs" @@ -49,7 +50,7 @@ func newCharacterDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.M return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ DeviceID: devDevice.DeviceID(), InodeID: devDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.CharacterDevice, DeviceFileMajor: major, DeviceFileMinor: minor, @@ -60,7 +61,7 @@ func newMemDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ DeviceID: devDevice.DeviceID(), InodeID: devDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.CharacterDevice, DeviceFileMajor: memDevMajor, DeviceFileMinor: minor, @@ -72,7 +73,7 @@ func newDirectory(ctx context.Context, contents map[string]*fs.Inode, msrc *fs.M return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ DeviceID: devDevice.DeviceID(), InodeID: devDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Directory, }) } @@ -82,7 +83,7 @@ func newSymlink(ctx context.Context, target string, msrc *fs.MountSource) *fs.In return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ DeviceID: devDevice.DeviceID(), InodeID: devDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Symlink, }) } @@ -137,7 +138,7 @@ func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode { return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ DeviceID: devDevice.DeviceID(), InodeID: devDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Directory, }) } diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go index 11a2984d8..77e8d222a 100644 --- a/pkg/sentry/fs/dev/net_tun.go +++ b/pkg/sentry/fs/dev/net_tun.go @@ -17,6 +17,7 @@ package dev import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" @@ -110,7 +111,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user } // Validate flags. - flags, err := netstack.LinuxToTUNFlags(usermem.ByteOrder.Uint16(req.Data[:])) + flags, err := netstack.LinuxToTUNFlags(hostarch.ByteOrder.Uint16(req.Data[:])) if err != nil { return 0, err } @@ -119,7 +120,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user case linux.TUNGETIFF: var req linux.IFReq copy(req.IFName[:], n.device.Name()) - usermem.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags())) + hostarch.ByteOrder.PutUint16(req.Data[:], netstack.TUNFlagsToLinux(n.device.Flags())) _, err := req.CopyOut(t, data) return 0, err diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD index c83baf464..2120f2bad 100644 --- a/pkg/sentry/fs/fdpipe/BUILD +++ b/pkg/sentry/fs/fdpipe/BUILD @@ -40,6 +40,7 @@ go_test( "//pkg/context", "//pkg/fd", "//pkg/fdnotifier", + "//pkg/hostarch", "//pkg/sentry/contexttest", "//pkg/sentry/fs", "//pkg/syserror", diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go index faeb3908c..ab0e9dac7 100644 --- a/pkg/sentry/fs/fdpipe/pipe_test.go +++ b/pkg/sentry/fs/fdpipe/pipe_test.go @@ -27,6 +27,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) func singlePipeFD() (int, error) { @@ -52,7 +54,7 @@ func mockPipeDirent(t *testing.T) *fs.Dirent { } inode := fs.NewInode(ctx, node, fs.NewMockMountSource(nil), fs.StableAttr{ Type: fs.Pipe, - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, }) return fs.NewDirent(ctx, inode, "") } diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD index d388f0e92..6469cc3a9 100644 --- a/pkg/sentry/fs/fsutil/BUILD +++ b/pkg/sentry/fs/fsutil/BUILD @@ -76,6 +76,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/safemem", "//pkg/sentry/arch", @@ -105,6 +106,7 @@ go_test( library = ":fsutil", deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/safemem", "//pkg/sentry/contexttest", "//pkg/sentry/fs", diff --git a/pkg/sentry/fs/fsutil/dirty_set.go b/pkg/sentry/fs/fsutil/dirty_set.go index 2c9446c1d..38383e730 100644 --- a/pkg/sentry/fs/fsutil/dirty_set.go +++ b/pkg/sentry/fs/fsutil/dirty_set.go @@ -18,9 +18,9 @@ import ( "math" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" - "gvisor.dev/gvisor/pkg/usermem" ) // DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to @@ -215,7 +215,7 @@ func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRan if max < wbr.Start { break } - ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read) + ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), hostarch.Read) if err != nil { return err } diff --git a/pkg/sentry/fs/fsutil/dirty_set_test.go b/pkg/sentry/fs/fsutil/dirty_set_test.go index e3579c23c..48448c97c 100644 --- a/pkg/sentry/fs/fsutil/dirty_set_test.go +++ b/pkg/sentry/fs/fsutil/dirty_set_test.go @@ -18,18 +18,18 @@ import ( "reflect" "testing" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/memmap" - "gvisor.dev/gvisor/pkg/usermem" ) func TestDirtySet(t *testing.T) { var set DirtySet - set.MarkDirty(memmap.MappableRange{0, 2 * usermem.PageSize}) - set.KeepDirty(memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize}) - set.MarkClean(memmap.MappableRange{0, 2 * usermem.PageSize}) + set.MarkDirty(memmap.MappableRange{0, 2 * hostarch.PageSize}) + set.KeepDirty(memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize}) + set.MarkClean(memmap.MappableRange{0, 2 * hostarch.PageSize}) want := &DirtySegmentDataSlices{ - Start: []uint64{usermem.PageSize}, - End: []uint64{2 * usermem.PageSize}, + Start: []uint64{hostarch.PageSize}, + End: []uint64{2 * hostarch.PageSize}, Values: []DirtyInfo{{Keep: true}}, } if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) { diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go index 1dc409d38..fdaceb1db 100644 --- a/pkg/sentry/fs/fsutil/file_range_set.go +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -20,11 +20,11 @@ import ( "math" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/usage" - "gvisor.dev/gvisor/pkg/usermem" ) // FileRangeSet maps offsets into a memmap.Mappable to offsets into a @@ -130,7 +130,7 @@ func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Map // MemoryFile.AllocateAndFill truncates down to a page // boundary, but FileRangeSet.Fill is supposed to // zero-fill to the end of the page in this case. - donepgaddr, ok := usermem.Addr(done).RoundUp() + donepgaddr, ok := hostarch.Addr(done).RoundUp() if donepg := uint64(donepgaddr); ok && donepg != done { dsts.DropFirst64(donepg - done) done = donepg @@ -184,7 +184,7 @@ func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) { // bytes after the new EOF on the same page are zeroed, and pages after the new // EOF are freed. func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { - pgendaddr, ok := usermem.Addr(end).RoundUp() + pgendaddr, ok := hostarch.Addr(end).RoundUp() if ok { pgend := uint64(pgendaddr) @@ -208,7 +208,7 @@ func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { if seg.Ok() { fr := seg.FileRange() fr.Start += end - seg.Start() - ims, err := mf.MapInternal(fr, usermem.Write) + ims, err := mf.MapInternal(fr, hostarch.Write) if err != nil { // There's no good recourse from here. This means // that we can't keep cached memory consistent with diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go index 54f7b7cdc..23528bf25 100644 --- a/pkg/sentry/fs/fsutil/host_file_mapper.go +++ b/pkg/sentry/fs/fsutil/host_file_mapper.go @@ -18,11 +18,11 @@ import ( "fmt" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // HostFileMapper caches mappings of an arbitrary host file descriptor. It is @@ -50,13 +50,13 @@ type HostFileMapper struct { } const ( - chunkShift = usermem.HugePageShift + chunkShift = hostarch.HugePageShift chunkSize = 1 << chunkShift chunkMask = chunkSize - 1 ) func pagesInChunk(mr memmap.MappableRange, chunkStart uint64) int32 { - return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / usermem.PageSize) + return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / hostarch.PageSize) } type mapping struct { diff --git a/pkg/sentry/fs/fsutil/host_mappable.go b/pkg/sentry/fs/fsutil/host_mappable.go index c15d8a946..e1e38b498 100644 --- a/pkg/sentry/fs/fsutil/host_mappable.go +++ b/pkg/sentry/fs/fsutil/host_mappable.go @@ -18,6 +18,7 @@ import ( "math" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/memmap" @@ -59,7 +60,7 @@ func NewHostMappable(backingFile CachedFileObject) *HostMappable { } // AddMapping implements memmap.Mappable.AddMapping. -func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { // Hot path. Avoid defers. h.mu.Lock() mapped := h.mappings.AddMapping(ms, ar, offset, writable) @@ -71,7 +72,7 @@ func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, a } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { // Hot path. Avoid defers. h.mu.Lock() unmapped := h.mappings.RemoveMapping(ms, ar, offset, writable) @@ -82,18 +83,18 @@ func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace } // CopyMapping implements memmap.Mappable.CopyMapping. -func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return h.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { return []memmap.Translation{ { Source: optional, File: h, Offset: optional.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, nil } @@ -124,7 +125,7 @@ func (h *HostMappable) NotifyChangeFD() error { } // MapInternal implements memmap.File.MapInternal. -func (h *HostMappable) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { +func (h *HostMappable) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) { return h.hostFileMapper.MapInternal(fr, h.backingFile.FD(), at.Write) } diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go index 0ed7aafa5..7856b354b 100644 --- a/pkg/sentry/fs/fsutil/inode_cached.go +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -19,6 +19,7 @@ import ( "io" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -622,7 +623,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { switch { case seg.Ok(): // Get internal mappings from the cache. - ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read) if err != nil { unlock() return done, err @@ -647,7 +648,7 @@ func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { // Read into the cache, then re-enter the loop to read from the // cache. reqMR := memmap.MappableRange{ - Start: uint64(usermem.Addr(gapMR.Start).RoundDown()), + Start: uint64(hostarch.Addr(gapMR.Start).RoundDown()), End: fs.OffsetPageEnd(int64(gapMR.End)), } optMR := gap.Range() @@ -729,7 +730,7 @@ func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error case seg.Ok() && seg.Start() < mr.End: // Get internal mappings from the cache. segMR := seg.Range().Intersect(mr) - ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(segMR), hostarch.Write) if err != nil { rw.maybeGrowFile() rw.c.dataMu.Unlock() @@ -786,7 +787,7 @@ func (c *CachingInodeOperations) useHostPageCache() bool { } // AddMapping implements memmap.Mappable.AddMapping. -func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { // Hot path. Avoid defers. c.mapsMu.Lock() mapped := c.mappings.AddMapping(ms, ar, offset, writable) @@ -808,7 +809,7 @@ func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.Mappi } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { // Hot path. Avoid defers. c.mapsMu.Lock() unmapped := c.mappings.RemoveMapping(ms, ar, offset, writable) @@ -836,12 +837,12 @@ func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Ma } // CopyMapping implements memmap.Mappable.CopyMapping. -func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return c.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { // Hot path. Avoid defer. if c.useHostPageCache() { mr := optional @@ -853,7 +854,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option Source: mr, File: c, Offset: mr.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, nil } @@ -885,7 +886,7 @@ func (c *CachingInodeOperations) Translate(ctx context.Context, required, option segMR := seg.Range().Intersect(optional) // TODO(jamieliu): Make Translations writable even if writability is // not required if already kept-dirty by another writable translation. - perms := usermem.AccessType{ + perms := hostarch.AccessType{ Read: true, Execute: true, } @@ -1050,7 +1051,7 @@ func (c *CachingInodeOperations) DecRef(fr memmap.FileRange) { // MapInternal implements memmap.File.MapInternal. This is used when we // directly map an underlying host fd and CachingInodeOperations is used as the // memmap.File during translation. -func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { +func (c *CachingInodeOperations) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) { return c.hostFileMapper.MapInternal(fr, c.backingFile.FD(), at.Write) } diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go index 1547584c5..e107c3096 100644 --- a/pkg/sentry/fs/fsutil/inode_cached_test.go +++ b/pkg/sentry/fs/fsutil/inode_cached_test.go @@ -20,6 +20,7 @@ import ( "testing" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/contexttest" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -249,7 +250,7 @@ func (f *sliceBackingFile) Allocate(ctx context.Context, offset int64, length in type noopMappingSpace struct{} // Invalidate implements memmap.MappingSpace.Invalidate. -func (noopMappingSpace) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) { +func (noopMappingSpace) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) { } func anonInode(ctx context.Context) *fs.Inode { @@ -259,14 +260,14 @@ func anonInode(ctx context.Context) *fs.Inode { }, 0), }, fs.NewPseudoMountSource(ctx), fs.StableAttr{ Type: fs.Anonymous, - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, }) } func pagesOf(bs ...byte) []byte { - buf := make([]byte, 0, len(bs)*usermem.PageSize) + buf := make([]byte, 0, len(bs)*hostarch.PageSize) for _, b := range bs { - buf = append(buf, bytes.Repeat([]byte{b}, usermem.PageSize)...) + buf = append(buf, bytes.Repeat([]byte{b}, hostarch.PageSize)...) } return buf } @@ -292,28 +293,28 @@ func TestRead(t *testing.T) { // expects to only cache mapped pages), then call Translate to force it to // be cached. var ms noopMappingSpace - ar := usermem.AddrRange{usermem.PageSize, 2 * usermem.PageSize} - if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil { + ar := hostarch.AddrRange{hostarch.PageSize, 2 * hostarch.PageSize} + if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil { t.Fatalf("AddMapping got %v, want nil", err) } - mr := memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize} - if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + mr := memmap.MappableRange{hostarch.PageSize, 2 * hostarch.PageSize} + if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil { t.Fatalf("Translate got %v, want nil", err) } - if cached := iops.cache.Span(); cached != usermem.PageSize { - t.Errorf("SpanRange got %d, want %d", cached, usermem.PageSize) + if cached := iops.cache.Span(); cached != hostarch.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, hostarch.PageSize) } // Try to read 4 pages. The first and third pages should be read directly // from the "file", the second page should be read from the cache, and only // 3 pages (the size of the file) should be readable. - rbuf := make([]byte, 4*usermem.PageSize) + rbuf := make([]byte, 4*hostarch.PageSize) dst := usermem.BytesIOSequence(rbuf) n, err := iops.Read(ctx, file, dst, 0) - if n != 3*usermem.PageSize || (err != nil && err != io.EOF) { - t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*usermem.PageSize) + if n != 3*hostarch.PageSize || (err != nil && err != io.EOF) { + t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*hostarch.PageSize) } - rbuf = rbuf[:3*usermem.PageSize] + rbuf = rbuf[:3*hostarch.PageSize] // Did we get the bytes we expect? if !bytes.Equal(rbuf, buf) { @@ -323,7 +324,7 @@ func TestRead(t *testing.T) { // Delete the memory mapping before iops.Release(). The cached page will // either be evicted by ctx's pgalloc.MemoryFile, or dropped by // iops.Release(). - iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true) + iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true) } func TestWrite(t *testing.T) { @@ -348,25 +349,25 @@ func TestWrite(t *testing.T) { // CachingInodeOperations expects to only cache mapped pages), then call // Translate to force them to be cached. var ms noopMappingSpace - ar := usermem.AddrRange{usermem.PageSize, 3 * usermem.PageSize} - if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil { + ar := hostarch.AddrRange{hostarch.PageSize, 3 * hostarch.PageSize} + if err := iops.AddMapping(ctx, ms, ar, hostarch.PageSize, true); err != nil { t.Fatalf("AddMapping got %v, want nil", err) } - defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true) - mr := memmap.MappableRange{usermem.PageSize, 3 * usermem.PageSize} - if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + defer iops.RemoveMapping(ctx, ms, ar, hostarch.PageSize, true) + mr := memmap.MappableRange{hostarch.PageSize, 3 * hostarch.PageSize} + if _, err := iops.Translate(ctx, mr, mr, hostarch.Read); err != nil { t.Fatalf("Translate got %v, want nil", err) } - if cached := iops.cache.Span(); cached != 2*usermem.PageSize { - t.Errorf("SpanRange got %d, want %d", cached, 2*usermem.PageSize) + if cached := iops.cache.Span(); cached != 2*hostarch.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, 2*hostarch.PageSize) } // Write to the first 2 pages. wbuf := pagesOf('e', 'f') src := usermem.BytesIOSequence(wbuf) n, err := iops.Write(ctx, src, 0) - if n != 2*usermem.PageSize || err != nil { - t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*usermem.PageSize) + if n != 2*hostarch.PageSize || err != nil { + t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*hostarch.PageSize) } // The first page should have been written directly, since it was not cached. @@ -382,7 +383,7 @@ func TestWrite(t *testing.T) { } // Now the second page should have been written as well. - copy(want[usermem.PageSize:], pagesOf('f')) + copy(want[hostarch.PageSize:], pagesOf('f')) if !bytes.Equal(buf, want) { t.Errorf("File contents are %v, want %v", buf, want) } diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD index b210e0e7e..c4a069832 100644 --- a/pkg/sentry/fs/gofer/BUILD +++ b/pkg/sentry/fs/gofer/BUILD @@ -27,6 +27,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fd", + "//pkg/hostarch", "//pkg/log", "//pkg/p9", "//pkg/refs", diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go index cffc756cc..d6bff3f40 100644 --- a/pkg/sentry/fs/gofer/attr.go +++ b/pkg/sentry/fs/gofer/attr.go @@ -17,11 +17,11 @@ package gofer import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" - "gvisor.dev/gvisor/pkg/usermem" ) // getattr returns the 9p attributes of the p9.File. On success, Mode, Size, and RDev @@ -98,7 +98,7 @@ func bsize(pattr p9.Attr) int64 { // Some files, particularly those that are not on a local file system, // may have no clue of their block size. Better not to report something // misleading or buggy and have a safe default. - return usermem.PageSize + return hostarch.PageSize } // ntype returns an fs.InodeType from 9p attributes. diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go index fb81d903d..1b83643db 100644 --- a/pkg/sentry/fs/inotify.go +++ b/pkg/sentry/fs/inotify.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/uniqueid" @@ -216,7 +217,7 @@ func (i *Inotify) Ioctl(ctx context.Context, _ *File, io usermem.IO, args arch.S n += uint32(e.sizeOf()) } var buf [4]byte - usermem.ByteOrder.PutUint32(buf[:], n) + hostarch.ByteOrder.PutUint32(buf[:], n) _, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{}) return 0, err diff --git a/pkg/sentry/fs/inotify_event.go b/pkg/sentry/fs/inotify_event.go index 686e1b1cd..399aff1ed 100644 --- a/pkg/sentry/fs/inotify_event.go +++ b/pkg/sentry/fs/inotify_event.go @@ -19,6 +19,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/usermem" ) @@ -100,10 +101,10 @@ func (e *Event) sizeOf() int { // construct the output. We use a buffer allocated ahead of time for // performance. buf must be at least inotifyEventBaseSize bytes. func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) { - usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) - usermem.ByteOrder.PutUint32(buf[4:], e.mask) - usermem.ByteOrder.PutUint32(buf[8:], e.cookie) - usermem.ByteOrder.PutUint32(buf[12:], e.len) + hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) + hostarch.ByteOrder.PutUint32(buf[4:], e.mask) + hostarch.ByteOrder.PutUint32(buf[8:], e.cookie) + hostarch.ByteOrder.PutUint32(buf[12:], e.len) writeLen := 0 diff --git a/pkg/sentry/fs/offset.go b/pkg/sentry/fs/offset.go index 53b5df175..3a8c97d8f 100644 --- a/pkg/sentry/fs/offset.go +++ b/pkg/sentry/fs/offset.go @@ -17,14 +17,14 @@ package fs import ( "math" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // OffsetPageEnd returns the file offset rounded up to the nearest // page boundary. OffsetPageEnd panics if rounding up causes overflow, // which shouldn't be possible given that offset is an int64. func OffsetPageEnd(offset int64) uint64 { - end, ok := usermem.Addr(offset).RoundUp() + end, ok := hostarch.Addr(offset).RoundUp() if !ok { panic("impossible overflow") } diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go index 01a1235b8..f96f5a3e5 100644 --- a/pkg/sentry/fs/overlay.go +++ b/pkg/sentry/fs/overlay.go @@ -19,11 +19,11 @@ import ( "strings" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // The virtual filesystem implements an overlay configuration. For a high-level @@ -274,7 +274,7 @@ func (o *overlayEntry) markDirectoryDirty() { } // AddMapping implements memmap.Mappable.AddMapping. -func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { o.mapsMu.Lock() defer o.mapsMu.Unlock() if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset, writable); err != nil { @@ -285,7 +285,7 @@ func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, a } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { o.mapsMu.Lock() defer o.mapsMu.Unlock() o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset, writable) @@ -293,7 +293,7 @@ func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace } // CopyMapping implements memmap.Mappable.CopyMapping. -func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { o.mapsMu.Lock() defer o.mapsMu.Unlock() if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil { @@ -304,7 +304,7 @@ func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, } // Translate implements memmap.Mappable.Translate. -func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { o.dataMu.RLock() defer o.dataMu.RUnlock() return o.inodeLocked().Mappable().Translate(ctx, required, optional, at) diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD index b8b2281a8..7af7e0b45 100644 --- a/pkg/sentry/fs/proc/BUILD +++ b/pkg/sentry/fs/proc/BUILD @@ -30,6 +30,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/sentry/fs", "//pkg/sentry/fs/fsutil", diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go index e6171dd1d..24426b225 100644 --- a/pkg/sentry/fs/proc/exec_args.go +++ b/pkg/sentry/fs/proc/exec_args.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/kernel" @@ -113,7 +114,7 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen defer m.DecUsers(ctx) // Figure out the bounds of the exec arg we are trying to read. - var execArgStart, execArgEnd usermem.Addr + var execArgStart, execArgEnd hostarch.Addr switch f.arg { case cmdlineExecArg: execArgStart, execArgEnd = m.ArgvStart(), m.ArgvEnd() @@ -172,8 +173,8 @@ func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen // https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208 // we'll return one page total between argv and envp because of the // above page restrictions. - if lengthEnvv > usermem.PageSize-len(buf) { - lengthEnvv = usermem.PageSize - len(buf) + if lengthEnvv > hostarch.PageSize-len(buf) { + lengthEnvv = hostarch.PageSize - len(buf) } // Make a new buffer to fit the whole thing tmp := make([]byte, length+lengthEnvv) diff --git a/pkg/sentry/fs/proc/inode.go b/pkg/sentry/fs/proc/inode.go index d2859a4c2..78132f7a5 100644 --- a/pkg/sentry/fs/proc/inode.go +++ b/pkg/sentry/fs/proc/inode.go @@ -17,13 +17,13 @@ package proc import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/mm" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange @@ -125,7 +125,7 @@ func newProcInode(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSo sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: typ, } if t != nil { diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go index 91617267d..7d975d333 100644 --- a/pkg/sentry/fs/proc/meminfo.go +++ b/pkg/sentry/fs/proc/meminfo.go @@ -19,10 +19,10 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/usage" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange @@ -53,7 +53,7 @@ func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) anon := snapshot.Anonymous + snapshot.Tmpfs file := snapshot.PageCache + snapshot.Mapped // We don't actually have active/inactive LRUs, so just make up numbers. - activeFile := (file / 2) &^ (usermem.PageSize - 1) + activeFile := (file / 2) &^ (hostarch.PageSize - 1) inactiveFile := file - activeFile var buf bytes.Buffer diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go index 203cfa061..91c35eea9 100644 --- a/pkg/sentry/fs/proc/net.go +++ b/pkg/sentry/fs/proc/net.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" @@ -35,7 +36,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange @@ -367,10 +367,10 @@ func (n *netRoute) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([] ) if len(rt.GatewayAddr) == header.IPv4AddressSize { flags |= linux.RTF_GATEWAY - gw = usermem.ByteOrder.Uint32(rt.GatewayAddr) + gw = hostarch.ByteOrder.Uint32(rt.GatewayAddr) } if len(rt.DstAddr) == header.IPv4AddressSize { - prefix = usermem.ByteOrder.Uint32(rt.DstAddr) + prefix = hostarch.ByteOrder.Uint32(rt.DstAddr) } l := fmt.Sprintf( "%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d", @@ -520,7 +520,7 @@ func networkToHost16(n uint16) uint16 { // binary.BigEndian.Uint16() require a read of binary.BigEndian and an // interface method call, defeating inlining. buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)} - return usermem.ByteOrder.Uint16(buf[:]) + return hostarch.ByteOrder.Uint16(buf[:]) } func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { @@ -542,14 +542,14 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { // __be32 which is a typedef for an unsigned int, and is printed with // %X. This means that for a little-endian machine, Linux prints the // least-significant byte of the address first. To emulate this, we first - // invert the byte order for the address using usermem.ByteOrder.Uint32, + // invert the byte order for the address using hostarch.ByteOrder.Uint32, // which makes it have the equivalent encoding to a __be32 on a little // endian machine. Note that this operation is a no-op on a big endian // machine. Then similar to Linux, we format it with %X, which will print // the most-significant byte of the __be32 address first, which is now // actually the least-significant byte of the original address in // linux.SockAddrInet.Addr on little endian machines, due to the conversion. - addr := usermem.ByteOrder.Uint32(a.Addr[:]) + addr := hostarch.ByteOrder.Uint32(a.Addr[:]) fmt.Fprintf(w, "%08X:%04X ", addr, port) case linux.AF_INET6: @@ -559,10 +559,10 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { } port := networkToHost16(a.Port) - addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4]) - addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8]) - addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12]) - addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16]) + addr0 := hostarch.ByteOrder.Uint32(a.Addr[0:4]) + addr1 := hostarch.ByteOrder.Uint32(a.Addr[4:8]) + addr2 := hostarch.ByteOrder.Uint32(a.Addr[8:12]) + addr3 := hostarch.ByteOrder.Uint32(a.Addr[12:16]) fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port) } } diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD index 21338d912..713b81e08 100644 --- a/pkg/sentry/fs/proc/seqfile/BUILD +++ b/pkg/sentry/fs/proc/seqfile/BUILD @@ -9,6 +9,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/fs/fsutil", "//pkg/sentry/fs/proc/device", diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go index 6121f0e95..b01688b1d 100644 --- a/pkg/sentry/fs/proc/seqfile/seqfile.go +++ b/pkg/sentry/fs/proc/seqfile/seqfile.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" @@ -131,7 +132,7 @@ func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, iops, msrc, sattr) diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go index bbe282c03..1d09afdd7 100644 --- a/pkg/sentry/fs/proc/sys_net.go +++ b/pkg/sentry/fs/proc/sys_net.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" @@ -76,7 +77,7 @@ func newTCPMemInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack, dir sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, tm, msrc, sattr) @@ -136,7 +137,7 @@ func (f *tcpMemFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequen f.tcpMemInode.mu.Lock() defer f.tcpMemInode.mu.Unlock() - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) size, err := readSize(f.tcpMemInode.dir, f.tcpMemInode.s) if err != nil { return 0, err @@ -192,7 +193,7 @@ func newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *f sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, ts, msrc, sattr) @@ -264,7 +265,7 @@ func (f *tcpSackFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSeque // Only consider size of one memory page for input for performance reasons. // We are only reading if it's zero or not anyway. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -294,7 +295,7 @@ func newTCPRecoveryInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, ts, msrc, sattr) @@ -354,7 +355,7 @@ func (f *tcpRecoveryFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS if src.NumBytes() == 0 { return 0, nil } - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -413,7 +414,7 @@ func newIPForwardingInode(ctx context.Context, msrc *fs.MountSource, s inet.Stac sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, ipf, msrc, sattr) @@ -486,7 +487,7 @@ func (f *ipForwardingFile) Write(ctx context.Context, _ *fs.File, src usermem.IO // Only consider size of one memory page for input for performance reasons. // We are only reading if it's zero or not anyway. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -524,7 +525,7 @@ func newPortRangeInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) sattr := fs.StableAttr{ DeviceID: device.ProcDevice.DeviceID(), InodeID: device.ProcDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, ipf, msrc, sattr) @@ -589,7 +590,7 @@ func (pf *portRangeFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSe // Only consider size of one memory page for input for performance // reasons. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) ports := make([]int32, 2) n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts) diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go index f43d6c221..ae5ed25f9 100644 --- a/pkg/sentry/fs/proc/task.go +++ b/pkg/sentry/fs/proc/task.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" @@ -469,7 +470,7 @@ func (m *memDataFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen defer mm.DecUsers(ctx) // Buffer the read data because of MM locks buf := make([]byte, dst.NumBytes()) - n, readErr := mm.CopyIn(ctx, usermem.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true}) + n, readErr := mm.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true}) if n > 0 { if _, err := dst.CopyOut(ctx, buf[:n]); err != nil { return 0, syserror.EFAULT @@ -632,7 +633,7 @@ func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) rss = mm.ResidentSetSize() } }) - fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize) + fmt.Fprintf(&buf, "%d %d ", vss, rss/hostarch.PageSize) // rsslim. fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Limits().Get(limits.Rss).Cur) @@ -684,7 +685,7 @@ func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([ }) var buf bytes.Buffer - fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize) + fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize) return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statmData)(nil)}}, 0 } @@ -939,8 +940,8 @@ func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequenc buf := make([]byte, size) for i, e := range auxv { - usermem.ByteOrder.PutUint64(buf[16*i:], e.Key) - usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value)) + hostarch.ByteOrder.PutUint64(buf[16*i:], e.Key) + hostarch.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value)) } n, err := dst.CopyOut(ctx, buf[offset:]) @@ -1020,7 +1021,7 @@ func (f *oomScoreAdjFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS } // Limit input size so as not to impact performance if input size is large. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go index 2bc9485d8..30d5ad4cf 100644 --- a/pkg/sentry/fs/proc/uid_gid_map.go +++ b/pkg/sentry/fs/proc/uid_gid_map.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/kernel" @@ -132,7 +133,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u // the system page size, and the write must be performed at the start of // the file ..." - user_namespaces(7) srclen := src.NumBytes() - if srclen >= usermem.PageSize || offset != 0 { + if srclen >= hostarch.PageSize || offset != 0 { return 0, syserror.EINVAL } b := make([]byte, srclen) diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD index a51d00d86..4a3d9636b 100644 --- a/pkg/sentry/fs/ramfs/BUILD +++ b/pkg/sentry/fs/ramfs/BUILD @@ -14,13 +14,13 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/fs/anon", "//pkg/sentry/fs/fsutil", "//pkg/sentry/socket/unix/transport", "//pkg/sync", "//pkg/syserror", - "//pkg/usermem", "//pkg/waiter", "@org_golang_x_sys//unix:go_default_library", ], diff --git a/pkg/sentry/fs/ramfs/tree.go b/pkg/sentry/fs/ramfs/tree.go index dfc9d3453..0ace636c9 100644 --- a/pkg/sentry/fs/ramfs/tree.go +++ b/pkg/sentry/fs/ramfs/tree.go @@ -20,9 +20,9 @@ import ( "strings" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/anon" - "gvisor.dev/gvisor/pkg/usermem" ) // MakeDirectoryTree constructs a ramfs tree of all directories containing @@ -71,7 +71,7 @@ func emptyDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { return fs.NewInode(ctx, dir, msrc, fs.StableAttr{ DeviceID: anon.PseudoDevice.DeviceID(), InodeID: anon.PseudoDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Directory, }) } diff --git a/pkg/sentry/fs/sys/BUILD b/pkg/sentry/fs/sys/BUILD index f2e8b9932..fdbc5f912 100644 --- a/pkg/sentry/fs/sys/BUILD +++ b/pkg/sentry/fs/sys/BUILD @@ -14,11 +14,11 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/device", "//pkg/sentry/fs", "//pkg/sentry/fs/fsutil", "//pkg/sentry/fs/ramfs", "//pkg/sentry/kernel", - "//pkg/usermem", ], ) diff --git a/pkg/sentry/fs/sys/sys.go b/pkg/sentry/fs/sys/sys.go index 0891645e4..101779a7a 100644 --- a/pkg/sentry/fs/sys/sys.go +++ b/pkg/sentry/fs/sys/sys.go @@ -17,16 +17,16 @@ package sys import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" - "gvisor.dev/gvisor/pkg/usermem" ) func newFile(ctx context.Context, node fs.InodeOperations, msrc *fs.MountSource) *fs.Inode { sattr := fs.StableAttr{ DeviceID: sysfsDevice.DeviceID(), InodeID: sysfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialFile, } return fs.NewInode(ctx, node, msrc, sattr) @@ -37,7 +37,7 @@ func newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.I return fs.NewInode(ctx, d, msrc, fs.StableAttr{ DeviceID: sysfsDevice.DeviceID(), InodeID: sysfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.SpecialDirectory, }) } diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD index d16cdb4df..c7977a217 100644 --- a/pkg/sentry/fs/timerfd/BUILD +++ b/pkg/sentry/fs/timerfd/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//pkg/sentry:internal"], deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/fs/anon", "//pkg/sentry/fs/fsutil", diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go index 46511a6ac..c8ebe256c 100644 --- a/pkg/sentry/fs/timerfd/timerfd.go +++ b/pkg/sentry/fs/timerfd/timerfd.go @@ -20,6 +20,7 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/anon" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" @@ -124,7 +125,7 @@ func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.I } if val := atomic.SwapUint64(&t.val, 0); val != 0 { var buf [sizeofUint64]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) if _, err := dst.CopyOut(ctx, buf[:]); err != nil { // Linux does not undo consuming the number of expirations even if // writing to userspace fails. diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD index b521a86a2..90398376a 100644 --- a/pkg/sentry/fs/tmpfs/BUILD +++ b/pkg/sentry/fs/tmpfs/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/safemem", "//pkg/sentry/device", "//pkg/sentry/fs", @@ -42,6 +43,7 @@ go_test( library = ":tmpfs", deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/kernel/contexttest", "//pkg/sentry/usage", diff --git a/pkg/sentry/fs/tmpfs/file_test.go b/pkg/sentry/fs/tmpfs/file_test.go index d4d613ea9..1718f9372 100644 --- a/pkg/sentry/fs/tmpfs/file_test.go +++ b/pkg/sentry/fs/tmpfs/file_test.go @@ -19,6 +19,7 @@ import ( "testing" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" "gvisor.dev/gvisor/pkg/sentry/usage" @@ -31,7 +32,7 @@ func newFileInode(ctx context.Context) *fs.Inode { return fs.NewInode(ctx, iops, m, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.RegularFile, }) } diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go index ad4aea282..f4de8c968 100644 --- a/pkg/sentry/fs/tmpfs/inode_file.go +++ b/pkg/sentry/fs/tmpfs/inode_file.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" @@ -125,7 +126,7 @@ func NewMemfdInode(ctx context.Context, allowSeals bool) *fs.Inode { Type: fs.RegularFile, DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, }) } @@ -392,7 +393,7 @@ func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { switch { case seg.Ok(): // Get internal mappings. - ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read) if err != nil { return done, err } @@ -463,7 +464,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) // // See Linux, mm/filemap.c:generic_perform_write() and // mm/shmem.c:shmem_write_begin(). - if pgstart := int64(usermem.Addr(rw.f.attr.Size).RoundDown()); end > pgstart { + if pgstart := int64(hostarch.Addr(rw.f.attr.Size).RoundDown()); end > pgstart { end = pgstart } if end <= rw.offset { @@ -483,8 +484,8 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) mf := rw.f.kernel.MemoryFile() // Page-aligned mr for when we need to allocate memory. RoundUp can't // overflow since end is an int64. - pgstartaddr := usermem.Addr(rw.offset).RoundDown() - pgendaddr, _ := usermem.Addr(end).RoundUp() + pgstartaddr := hostarch.Addr(rw.offset).RoundDown() + pgendaddr, _ := hostarch.Addr(end).RoundUp() pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)} var done uint64 @@ -494,7 +495,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) switch { case seg.Ok(): // Get internal mappings. - ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Write) if err != nil { return done, err } @@ -527,7 +528,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) } // AddMapping implements memmap.Mappable.AddMapping. -func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { f.mapsMu.Lock() defer f.mapsMu.Unlock() @@ -544,7 +545,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS pagesBefore := f.writableMappingPages // ar is guaranteed to be page aligned per memmap.Mappable. - f.writableMappingPages += uint64(ar.Length() / usermem.PageSize) + f.writableMappingPages += uint64(ar.Length() / hostarch.PageSize) if f.writableMappingPages < pagesBefore { panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages)) @@ -555,7 +556,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { f.mapsMu.Lock() defer f.mapsMu.Unlock() @@ -565,7 +566,7 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi pagesBefore := f.writableMappingPages // ar is guaranteed to be page aligned per memmap.Mappable. - f.writableMappingPages -= uint64(ar.Length() / usermem.PageSize) + f.writableMappingPages -= uint64(ar.Length() / hostarch.PageSize) if f.writableMappingPages > pagesBefore { panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages)) @@ -574,12 +575,12 @@ func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.Mappi } // CopyMapping implements memmap.Mappable.CopyMapping. -func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return f.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { f.dataMu.Lock() defer f.dataMu.Unlock() @@ -612,7 +613,7 @@ func (f *fileInodeOperations) Translate(ctx context.Context, required, optional Source: segMR, File: mf, Offset: seg.FileRangeOf(segMR).Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }) translatedEnd = segMR.End } diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go index cf4ed5de0..577052888 100644 --- a/pkg/sentry/fs/tmpfs/tmpfs.go +++ b/pkg/sentry/fs/tmpfs/tmpfs.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" @@ -28,7 +29,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) var fsInfo = fs.Info{ @@ -41,8 +41,8 @@ var fsInfo = fs.Info{ // chosen to ensure that BlockSize * Blocks does not overflow int64 (which // applications may also handle incorrectly). // TODO(b/29637826): allow configuring a tmpfs size and enforce it. - TotalBlocks: math.MaxInt64 / usermem.PageSize, - FreeBlocks: math.MaxInt64 / usermem.PageSize, + TotalBlocks: math.MaxInt64 / hostarch.PageSize, + FreeBlocks: math.MaxInt64 / hostarch.PageSize, } // rename implements fs.InodeOperations.Rename for tmpfs nodes. @@ -99,7 +99,7 @@ func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwn return fs.NewInode(ctx, d, msrc, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Directory, }) } @@ -232,7 +232,7 @@ func (d *Dir) newCreateOps() *ramfs.CreateOps { return fs.NewInode(ctx, iops, dir.MountSource, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.RegularFile, }), nil }, @@ -281,7 +281,7 @@ func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs return fs.NewInode(ctx, s, msrc, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Symlink, }) } @@ -311,7 +311,7 @@ func NewSocket(ctx context.Context, socket transport.BoundEndpoint, owner fs.Fil return fs.NewInode(ctx, s, msrc, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Socket, }) } @@ -348,7 +348,7 @@ func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, return fs.NewInode(ctx, fifoIops, msrc, fs.StableAttr{ DeviceID: tmpfsDevice.DeviceID(), InodeID: tmpfsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Pipe, }) } diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD index e6d0eb359..86ada820e 100644 --- a/pkg/sentry/fs/tty/BUILD +++ b/pkg/sentry/fs/tty/BUILD @@ -17,6 +17,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/marshal/primitive", "//pkg/refs", "//pkg/safemem", diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go index c2da80bc2..13c9dbe7d 100644 --- a/pkg/sentry/fs/tty/dir.go +++ b/pkg/sentry/fs/tty/dir.go @@ -22,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -122,7 +123,7 @@ func newDir(ctx context.Context, m *fs.MountSource) *fs.Inode { // TODO(b/75267214): Since ptsDevice must be shared between // different mounts, we must not assign fixed numbers. InodeID: ptsDevice.NextIno(), - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, Type: fs.Directory, }) } diff --git a/pkg/sentry/fsimpl/eventfd/BUILD b/pkg/sentry/fsimpl/eventfd/BUILD index bcb01bb08..c09fdc7f9 100644 --- a/pkg/sentry/fsimpl/eventfd/BUILD +++ b/pkg/sentry/fsimpl/eventfd/BUILD @@ -10,6 +10,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fdnotifier", + "//pkg/hostarch", "//pkg/log", "//pkg/sentry/vfs", "//pkg/syserror", diff --git a/pkg/sentry/fsimpl/eventfd/eventfd.go b/pkg/sentry/fsimpl/eventfd/eventfd.go index 30bd05357..4f79cfcb7 100644 --- a/pkg/sentry/fsimpl/eventfd/eventfd.go +++ b/pkg/sentry/fsimpl/eventfd/eventfd.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" @@ -188,7 +189,7 @@ func (efd *EventFileDescription) read(ctx context.Context, dst usermem.IOSequenc efd.queue.Notify(waiter.WritableEvents) var buf [8]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) _, err := dst.CopyOut(ctx, buf[:]) return err } @@ -196,7 +197,7 @@ func (efd *EventFileDescription) read(ctx context.Context, dst usermem.IOSequenc // Preconditions: Must be called with efd.mu locked. func (efd *EventFileDescription) hostWriteLocked(val uint64) error { var buf [8]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) _, err := unix.Write(efd.hostfd, buf[:]) if err == unix.EWOULDBLOCK { return syserror.ErrWouldBlock @@ -209,7 +210,7 @@ func (efd *EventFileDescription) write(ctx context.Context, src usermem.IOSequen if _, err := src.CopyIn(ctx, buf[:]); err != nil { return err } - val := usermem.ByteOrder.Uint64(buf[:]) + val := hostarch.ByteOrder.Uint64(buf[:]) return efd.Signal(val) } diff --git a/pkg/sentry/fsimpl/fuse/BUILD b/pkg/sentry/fsimpl/fuse/BUILD index 155c0f56d..3a4777fbe 100644 --- a/pkg/sentry/fsimpl/fuse/BUILD +++ b/pkg/sentry/fsimpl/fuse/BUILD @@ -46,6 +46,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/refs", @@ -75,6 +76,7 @@ go_test( library = ":fuse", deps = [ "//pkg/abi/linux", + "//pkg/hostarch", "//pkg/marshal", "//pkg/sentry/fsimpl/testutil", "//pkg/sentry/kernel", diff --git a/pkg/sentry/fsimpl/fuse/read_write.go b/pkg/sentry/fsimpl/fuse/read_write.go index 23ce91849..66ea889f9 100644 --- a/pkg/sentry/fsimpl/fuse/read_write.go +++ b/pkg/sentry/fsimpl/fuse/read_write.go @@ -20,11 +20,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // ReadInPages sends FUSE_READ requests for the size after round it up to @@ -43,10 +43,10 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui } // Round up to a multiple of page size. - readSize, _ := usermem.PageRoundUp(uint64(size)) + readSize, _ := hostarch.PageRoundUp(uint64(size)) // One request cannnot exceed either maxRead or maxPages. - maxPages := fs.conn.maxRead >> usermem.PageShift + maxPages := fs.conn.maxRead >> hostarch.PageShift if maxPages > uint32(fs.conn.maxPages) { maxPages = uint32(fs.conn.maxPages) } @@ -54,9 +54,9 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui var outs [][]byte var sizeRead uint32 - // readSize is a multiple of usermem.PageSize. + // readSize is a multiple of hostarch.PageSize. // Always request bytes as a multiple of pages. - pagesRead, pagesToRead := uint32(0), uint32(readSize>>usermem.PageShift) + pagesRead, pagesToRead := uint32(0), uint32(readSize>>hostarch.PageShift) // Reuse the same struct for unmarshalling to avoid unnecessary memory allocation. in := linux.FUSEReadIn{ @@ -76,8 +76,8 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui pagesCanRead = maxPages } - in.Offset = off + (uint64(pagesRead) << usermem.PageShift) - in.Size = pagesCanRead << usermem.PageShift + in.Offset = off + (uint64(pagesRead) << hostarch.PageShift) + in.Size = pagesCanRead << hostarch.PageShift // TODO(gvisor.dev/issue/3247): support async read. @@ -159,7 +159,7 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64, } // One request cannnot exceed either maxWrite or maxPages. - maxWrite := uint32(fs.conn.maxPages) << usermem.PageShift + maxWrite := uint32(fs.conn.maxPages) << hostarch.PageShift if maxWrite > fs.conn.maxWrite { maxWrite = fs.conn.maxWrite } @@ -188,8 +188,8 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64, // Limit the write size to one page. // Note that the bigWrites flag is obsolete, // latest libfuse always sets it on. - if !fs.conn.bigWrites && toWrite > usermem.PageSize { - toWrite = usermem.PageSize + if !fs.conn.bigWrites && toWrite > hostarch.PageSize { + toWrite = hostarch.PageSize } // Limit the write size to maxWrite. diff --git a/pkg/sentry/fsimpl/fuse/request_response.go b/pkg/sentry/fsimpl/fuse/request_response.go index 10fb9d7d2..8a72489fa 100644 --- a/pkg/sentry/fsimpl/fuse/request_response.go +++ b/pkg/sentry/fsimpl/fuse/request_response.go @@ -19,10 +19,10 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" - "gvisor.dev/gvisor/pkg/usermem" ) // fuseInitRes is a variable-length wrapper of linux.FUSEInitOut. The FUSE @@ -45,29 +45,29 @@ func (r *fuseInitRes) UnmarshalBytes(src []byte) { out := &r.initOut // Introduced before FUSE kernel version 7.13. - out.Major = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.Major = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] - out.Minor = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.Minor = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] - out.MaxReadahead = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.MaxReadahead = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] - out.Flags = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.Flags = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] - out.MaxBackground = uint16(usermem.ByteOrder.Uint16(src[:2])) + out.MaxBackground = uint16(hostarch.ByteOrder.Uint16(src[:2])) src = src[2:] - out.CongestionThreshold = uint16(usermem.ByteOrder.Uint16(src[:2])) + out.CongestionThreshold = uint16(hostarch.ByteOrder.Uint16(src[:2])) src = src[2:] - out.MaxWrite = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.MaxWrite = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] // Introduced in FUSE kernel version 7.23. if len(src) >= 4 { - out.TimeGran = uint32(usermem.ByteOrder.Uint32(src[:4])) + out.TimeGran = uint32(hostarch.ByteOrder.Uint32(src[:4])) src = src[4:] } // Introduced in FUSE kernel version 7.28. if len(src) >= 2 { - out.MaxPages = uint16(usermem.ByteOrder.Uint16(src[:2])) + out.MaxPages = uint16(hostarch.ByteOrder.Uint16(src[:2])) src = src[2:] } _ = src // Remove unused warning. diff --git a/pkg/sentry/fsimpl/fuse/utils_test.go b/pkg/sentry/fsimpl/fuse/utils_test.go index 2c0cc0f4e..b0bab0066 100644 --- a/pkg/sentry/fsimpl/fuse/utils_test.go +++ b/pkg/sentry/fsimpl/fuse/utils_test.go @@ -24,7 +24,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/vfs" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) func setup(t *testing.T) *testutil.System { @@ -82,12 +83,12 @@ func (t *testPayload) SizeBytes() int { // MarshalBytes implements marshal.Marshallable.MarshalBytes. func (t *testPayload) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], t.data) + hostarch.ByteOrder.PutUint32(dst[:4], t.data) } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. func (t *testPayload) UnmarshalBytes(src []byte) { - *t = testPayload{data: usermem.ByteOrder.Uint32(src[:4])} + *t = testPayload{data: hostarch.ByteOrder.Uint32(src[:4])} } // Packed implements marshal.Marshallable.Packed. @@ -106,17 +107,17 @@ func (t *testPayload) UnmarshalUnsafe(src []byte) { } // CopyOutN implements marshal.Marshallable.CopyOutN. -func (t *testPayload) CopyOutN(task marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (t *testPayload) CopyOutN(task marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { panic("not implemented") } // CopyOut implements marshal.Marshallable.CopyOut. -func (t *testPayload) CopyOut(task marshal.CopyContext, addr usermem.Addr) (int, error) { +func (t *testPayload) CopyOut(task marshal.CopyContext, addr hostarch.Addr) (int, error) { panic("not implemented") } // CopyIn implements marshal.Marshallable.CopyIn. -func (t *testPayload) CopyIn(task marshal.CopyContext, addr usermem.Addr) (int, error) { +func (t *testPayload) CopyIn(task marshal.CopyContext, addr hostarch.Addr) (int, error) { panic("not implemented") } diff --git a/pkg/sentry/fsimpl/gofer/BUILD b/pkg/sentry/fsimpl/gofer/BUILD index 807b6ed1f..6d5258a9b 100644 --- a/pkg/sentry/fsimpl/gofer/BUILD +++ b/pkg/sentry/fsimpl/gofer/BUILD @@ -51,6 +51,7 @@ go_library( "//pkg/fd", "//pkg/fdnotifier", "//pkg/fspath", + "//pkg/hostarch", "//pkg/log", "//pkg/p9", "//pkg/refs", diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go index 9da01cba3..177e42649 100644 --- a/pkg/sentry/fsimpl/gofer/directory.go +++ b/pkg/sentry/fsimpl/gofer/directory.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/refsvfs2" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -28,7 +29,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) func (d *dentry) isDir() bool { @@ -98,7 +98,7 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) { mode: uint32(opts.mode), uid: uint32(opts.kuid), gid: uint32(opts.kgid), - blockSize: usermem.PageSize, // arbitrary + blockSize: hostarch.PageSize, // arbitrary atime: now, mtime: now, ctime: now, diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go index 692da02c1..a0c05231a 100644 --- a/pkg/sentry/fsimpl/gofer/gofer.go +++ b/pkg/sentry/fsimpl/gofer/gofer.go @@ -44,6 +44,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/p9" refs_vfs1 "gvisor.dev/gvisor/pkg/refs" @@ -60,7 +61,6 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/unet" - "gvisor.dev/gvisor/pkg/usermem" ) // Name is the default filesystem name. @@ -872,7 +872,7 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma mode: uint32(attr.Mode), uid: uint32(fs.opts.dfltuid), gid: uint32(fs.opts.dfltgid), - blockSize: usermem.PageSize, + blockSize: hostarch.PageSize, readFD: -1, writeFD: -1, mmapFD: -1, @@ -1217,8 +1217,8 @@ func (d *dentry) updateSizeLocked(newSize uint64) { // so we can't race with Write or another truncate.) d.dataMu.Unlock() if d.size < oldSize { - oldpgend, _ := usermem.PageRoundUp(oldSize) - newpgend, _ := usermem.PageRoundUp(d.size) + oldpgend, _ := hostarch.PageRoundUp(oldSize) + newpgend, _ := hostarch.PageRoundUp(d.size) if oldpgend != newpgend { d.mapsMu.Lock() d.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{ diff --git a/pkg/sentry/fsimpl/gofer/regular_file.go b/pkg/sentry/fsimpl/gofer/regular_file.go index 4f1ad0c88..47563538c 100644 --- a/pkg/sentry/fsimpl/gofer/regular_file.go +++ b/pkg/sentry/fsimpl/gofer/regular_file.go @@ -22,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/safemem" @@ -291,8 +292,8 @@ func (fd *regularFileFD) writeCache(ctx context.Context, d *dentry, offset int64 } // Remove touched pages from the cache. - pgstart := usermem.PageRoundDown(uint64(offset)) - pgend, ok := usermem.PageRoundUp(uint64(offset + src.NumBytes())) + pgstart := hostarch.PageRoundDown(uint64(offset)) + pgend, ok := hostarch.PageRoundUp(uint64(offset + src.NumBytes())) if !ok { return syserror.EINVAL } @@ -408,7 +409,7 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) switch { case seg.Ok(): // Get internal mappings from the cache. - ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read) if err != nil { dataMuUnlock() rw.d.handleMu.RUnlock() @@ -434,9 +435,9 @@ func (rw *dentryReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) if fillCache { // Read into the cache, then re-enter the loop to read from the // cache. - gapEnd, _ := usermem.PageRoundUp(gapMR.End) + gapEnd, _ := hostarch.PageRoundUp(gapMR.End) reqMR := memmap.MappableRange{ - Start: usermem.PageRoundDown(gapMR.Start), + Start: hostarch.PageRoundDown(gapMR.Start), End: gapEnd, } optMR := gap.Range() @@ -527,7 +528,7 @@ func (rw *dentryReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, erro case seg.Ok(): // Get internal mappings from the cache. segMR := seg.Range().Intersect(mr) - ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + ims, err := mf.MapInternal(seg.FileRangeOf(segMR), hostarch.Write) if err != nil { retErr = err goto exitLoop @@ -714,7 +715,7 @@ func (d *dentry) mayCachePages() bool { } // AddMapping implements memmap.Mappable.AddMapping. -func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { d.mapsMu.Lock() mapped := d.mappings.AddMapping(ms, ar, offset, writable) // Do this unconditionally since whether we have a host FD can change @@ -735,7 +736,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar user } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { d.mapsMu.Lock() unmapped := d.mappings.RemoveMapping(ms, ar, offset, writable) for _, r := range unmapped { @@ -759,12 +760,12 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar u } // CopyMapping implements memmap.Mappable.CopyMapping. -func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return d.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { d.handleMu.RLock() if d.mmapFD >= 0 && !d.fs.opts.forcePageCache { d.handleMu.RUnlock() @@ -777,7 +778,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab Source: mr, File: &d.pf, Offset: mr.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, nil } @@ -786,7 +787,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab // Constrain translations to d.size (rounded up) to prevent translation to // pages that may be concurrently truncated. - pgend, _ := usermem.PageRoundUp(d.size) + pgend, _ := hostarch.PageRoundUp(d.size) var beyondEOF bool if required.End > pgend { if required.Start >= pgend { @@ -811,7 +812,7 @@ func (d *dentry) Translate(ctx context.Context, required, optional memmap.Mappab segMR := seg.Range().Intersect(optional) // TODO(jamieliu): Make Translations writable even if writability is // not required if already kept-dirty by another writable translation. - perms := usermem.AccessType{ + perms := hostarch.AccessType{ Read: true, Execute: true, } @@ -954,7 +955,7 @@ func (d *dentryPlatformFile) DecRef(fr memmap.FileRange) { } // MapInternal implements memmap.File.MapInternal. -func (d *dentryPlatformFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { +func (d *dentryPlatformFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) { d.handleMu.RLock() defer d.handleMu.RUnlock() return d.hostFileMapper.MapInternal(fr, int(d.mmapFD), at.Write) diff --git a/pkg/sentry/fsimpl/gofer/save_restore.go b/pkg/sentry/fsimpl/gofer/save_restore.go index c90071e4e..83e841a51 100644 --- a/pkg/sentry/fsimpl/gofer/save_restore.go +++ b/pkg/sentry/fsimpl/gofer/save_restore.go @@ -22,12 +22,12 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/refsvfs2" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) type saveRestoreContextID int @@ -85,7 +85,7 @@ func (fs *filesystem) PrepareSave(ctx context.Context) error { func (fd *specialFileFD) savePipeData(ctx context.Context) error { fd.bufMu.Lock() defer fd.bufMu.Unlock() - var buf [usermem.PageSize]byte + var buf [hostarch.PageSize]byte for { n, err := fd.handle.readToBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:])), ^uint64(0)) if n != 0 { diff --git a/pkg/sentry/fsimpl/host/BUILD b/pkg/sentry/fsimpl/host/BUILD index 4ae9d6d5e..b94dfeb7f 100644 --- a/pkg/sentry/fsimpl/host/BUILD +++ b/pkg/sentry/fsimpl/host/BUILD @@ -47,6 +47,7 @@ go_library( "//pkg/context", "//pkg/fdnotifier", "//pkg/fspath", + "//pkg/hostarch", "//pkg/iovec", "//pkg/log", "//pkg/marshal/primitive", diff --git a/pkg/sentry/fsimpl/host/host.go b/pkg/sentry/fsimpl/host/host.go index b9cce4181..3b90375b6 100644 --- a/pkg/sentry/fsimpl/host/host.go +++ b/pkg/sentry/fsimpl/host/host.go @@ -26,6 +26,7 @@ import ( "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/hostfd" @@ -431,8 +432,8 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre } oldSize := uint64(hostStat.Size) if s.Size < oldSize { - oldpgend, _ := usermem.PageRoundUp(oldSize) - newpgend, _ := usermem.PageRoundUp(s.Size) + oldpgend, _ := hostarch.PageRoundUp(oldSize) + newpgend, _ := hostarch.PageRoundUp(s.Size) if oldpgend != newpgend { i.CachedMappable.InvalidateRange(memmap.MappableRange{newpgend, oldpgend}) } diff --git a/pkg/sentry/fsimpl/host/save_restore.go b/pkg/sentry/fsimpl/host/save_restore.go index 5688bddc8..31301c715 100644 --- a/pkg/sentry/fsimpl/host/save_restore.go +++ b/pkg/sentry/fsimpl/host/save_restore.go @@ -21,9 +21,9 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/hostfd" - "gvisor.dev/gvisor/pkg/usermem" ) // beforeSave is invoked by stateify. @@ -38,7 +38,7 @@ func (i *inode) beforeSave() { // EBADF from the read. i.bufMu.Lock() defer i.bufMu.Unlock() - var buf [usermem.PageSize]byte + var buf [hostarch.PageSize]byte for { n, err := hostfd.Preadv2(int32(i.hostFD), safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:])), -1 /* offset */, 0 /* flags */) if n != 0 { diff --git a/pkg/sentry/fsimpl/kernfs/BUILD b/pkg/sentry/fsimpl/kernfs/BUILD index 6dbc7e34d..b7d13cced 100644 --- a/pkg/sentry/fsimpl/kernfs/BUILD +++ b/pkg/sentry/fsimpl/kernfs/BUILD @@ -105,6 +105,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go index 6b890a39c..3d0866ecf 100644 --- a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go +++ b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go @@ -20,12 +20,12 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // InodeNoopRefCount partially implements the Inode interface, specifically the @@ -206,7 +206,7 @@ func (a *InodeAttrs) Init(ctx context.Context, creds *auth.Credentials, devMajor atomic.StoreUint32(&a.uid, uint32(creds.EffectiveKUID)) atomic.StoreUint32(&a.gid, uint32(creds.EffectiveKGID)) atomic.StoreUint32(&a.nlink, nlink) - atomic.StoreUint32(&a.blockSize, usermem.PageSize) + atomic.StoreUint32(&a.blockSize, hostarch.PageSize) now := ktime.NowFromContext(ctx).Nanoseconds() atomic.StoreInt64(&a.atime, now) atomic.StoreInt64(&a.mtime, now) diff --git a/pkg/sentry/fsimpl/kernfs/mmap_util.go b/pkg/sentry/fsimpl/kernfs/mmap_util.go index bd6a134b4..d1539d904 100644 --- a/pkg/sentry/fsimpl/kernfs/mmap_util.go +++ b/pkg/sentry/fsimpl/kernfs/mmap_util.go @@ -16,11 +16,11 @@ package kernfs import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // inodePlatformFile implements memmap.File. It exists solely because inode @@ -66,7 +66,7 @@ func (i *inodePlatformFile) DecRef(fr memmap.FileRange) { } // MapInternal implements memmap.File.MapInternal. -func (i *inodePlatformFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { +func (i *inodePlatformFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) { return i.fileMapper.MapInternal(fr, i.hostFD, at.Write) } @@ -100,7 +100,7 @@ func (i *CachedMappable) Init(hostFD int) { } // AddMapping implements memmap.Mappable.AddMapping. -func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { i.mapsMu.Lock() mapped := i.mappings.AddMapping(ms, ar, offset, writable) for _, r := range mapped { @@ -111,7 +111,7 @@ func (i *CachedMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { i.mapsMu.Lock() unmapped := i.mappings.RemoveMapping(ms, ar, offset, writable) for _, r := range unmapped { @@ -121,19 +121,19 @@ func (i *CachedMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpa } // CopyMapping implements memmap.Mappable.CopyMapping. -func (i *CachedMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (i *CachedMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return i.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (i *CachedMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (i *CachedMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { mr := optional return []memmap.Translation{ { Source: mr, File: &i.pf, Offset: mr.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, nil } diff --git a/pkg/sentry/fsimpl/overlay/BUILD b/pkg/sentry/fsimpl/overlay/BUILD index bf13bbbf4..5504476c8 100644 --- a/pkg/sentry/fsimpl/overlay/BUILD +++ b/pkg/sentry/fsimpl/overlay/BUILD @@ -30,6 +30,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/fsimpl/overlay/copy_up.go b/pkg/sentry/fsimpl/overlay/copy_up.go index 27b00cf6f..45aa5a494 100644 --- a/pkg/sentry/fsimpl/overlay/copy_up.go +++ b/pkg/sentry/fsimpl/overlay/copy_up.go @@ -21,11 +21,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) func (d *dentry) isCopiedUp() bool { @@ -138,8 +138,8 @@ func (d *dentry) copyUpLocked(ctx context.Context) error { // We may have memory mappings of the file on the lower layer. // Switch to mapping the file on the upper layer instead. mmapOpts = &memmap.MMapOpts{ - Perms: usermem.ReadWrite, - MaxPerms: usermem.ReadWrite, + Perms: hostarch.ReadWrite, + MaxPerms: hostarch.ReadWrite, } if err := newFD.ConfigureMMap(ctx, mmapOpts); err != nil { cleanupUndoCopyUp() diff --git a/pkg/sentry/fsimpl/overlay/regular_file.go b/pkg/sentry/fsimpl/overlay/regular_file.go index d791c06db..43bfd69a3 100644 --- a/pkg/sentry/fsimpl/overlay/regular_file.go +++ b/pkg/sentry/fsimpl/overlay/regular_file.go @@ -19,6 +19,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -445,7 +446,7 @@ func (fd *regularFileFD) ensureMappable(ctx context.Context, opts *memmap.MMapOp } // AddMapping implements memmap.Mappable.AddMapping. -func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { d.mapsMu.Lock() defer d.mapsMu.Unlock() if err := d.wrappedMappable.AddMapping(ctx, ms, ar, offset, writable); err != nil { @@ -458,7 +459,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar user } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { d.mapsMu.Lock() defer d.mapsMu.Unlock() d.wrappedMappable.RemoveMapping(ctx, ms, ar, offset, writable) @@ -468,7 +469,7 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar u } // CopyMapping implements memmap.Mappable.CopyMapping. -func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { d.mapsMu.Lock() defer d.mapsMu.Unlock() if err := d.wrappedMappable.CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil { @@ -481,7 +482,7 @@ func (d *dentry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, } // Translate implements memmap.Mappable.Translate. -func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (d *dentry) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { d.dataMu.RLock() defer d.dataMu.RUnlock() return d.wrappedMappable.Translate(ctx, required, optional, at) diff --git a/pkg/sentry/fsimpl/pipefs/BUILD b/pkg/sentry/fsimpl/pipefs/BUILD index 5950a2d59..278ee3c92 100644 --- a/pkg/sentry/fsimpl/pipefs/BUILD +++ b/pkg/sentry/fsimpl/pipefs/BUILD @@ -10,12 +10,12 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/sentry/fsimpl/kernfs", "//pkg/sentry/kernel/auth", "//pkg/sentry/kernel/pipe", "//pkg/sentry/kernel/time", "//pkg/sentry/vfs", "//pkg/syserror", - "//pkg/usermem", ], ) diff --git a/pkg/sentry/fsimpl/pipefs/pipefs.go b/pkg/sentry/fsimpl/pipefs/pipefs.go index 3f05e444e..08aedc2ad 100644 --- a/pkg/sentry/fsimpl/pipefs/pipefs.go +++ b/pkg/sentry/fsimpl/pipefs/pipefs.go @@ -22,13 +22,13 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/kernel/pipe" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // +stateify savable @@ -131,7 +131,7 @@ func (i *inode) Stat(_ context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOpti ts := linux.NsecToStatxTimestamp(i.ctime.Nanoseconds()) return linux.Statx{ Mask: linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_INO | linux.STATX_SIZE | linux.STATX_BLOCKS, - Blksize: usermem.PageSize, + Blksize: hostarch.PageSize, Nlink: 1, UID: uint32(i.uid), GID: uint32(i.gid), diff --git a/pkg/sentry/fsimpl/proc/BUILD b/pkg/sentry/fsimpl/proc/BUILD index d47a4fff9..2b628bd55 100644 --- a/pkg/sentry/fsimpl/proc/BUILD +++ b/pkg/sentry/fsimpl/proc/BUILD @@ -81,6 +81,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/fsimpl/proc/task_files.go b/pkg/sentry/fsimpl/proc/task_files.go index fdae163d1..85909d551 100644 --- a/pkg/sentry/fsimpl/proc/task_files.go +++ b/pkg/sentry/fsimpl/proc/task_files.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fsbridge" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" @@ -122,8 +123,8 @@ func (d *auxvData) Generate(ctx context.Context, buf *bytes.Buffer) error { buf.Grow((len(auxv) + 1) * 16) for _, e := range auxv { var tmp [16]byte - usermem.ByteOrder.PutUint64(tmp[:8], e.Key) - usermem.ByteOrder.PutUint64(tmp[8:], uint64(e.Value)) + hostarch.ByteOrder.PutUint64(tmp[:8], e.Key) + hostarch.ByteOrder.PutUint64(tmp[8:], uint64(e.Value)) buf.Write(tmp[:]) } var atNull [16]byte @@ -168,15 +169,15 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error { defer m.DecUsers(ctx) // Figure out the bounds of the exec arg we are trying to read. - var ar usermem.AddrRange + var ar hostarch.AddrRange switch d.arg { case cmdlineDataArg: - ar = usermem.AddrRange{ + ar = hostarch.AddrRange{ Start: m.ArgvStart(), End: m.ArgvEnd(), } case environDataArg: - ar = usermem.AddrRange{ + ar = hostarch.AddrRange{ Start: m.EnvvStart(), End: m.EnvvEnd(), } @@ -192,7 +193,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error { // until Linux 4.9 (272ddc8b3735 "proc: don't use FOLL_FORCE for reading // cmdline and environment"). writer := &bufferWriter{buf: buf} - if n, err := m.CopyInTo(ctx, usermem.AddrRangeSeqOf(ar), writer, usermem.IOOpts{}); n == 0 || err != nil { + if n, err := m.CopyInTo(ctx, hostarch.AddrRangeSeqOf(ar), writer, usermem.IOOpts{}); n == 0 || err != nil { // Nothing to copy or something went wrong. return err } @@ -209,7 +210,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error { } // There is no NULL terminator in the string, return into envp. - arEnvv := usermem.AddrRange{ + arEnvv := hostarch.AddrRange{ Start: m.EnvvStart(), End: m.EnvvEnd(), } @@ -218,11 +219,11 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error { // https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208 // we'll return one page total between argv and envp because of the // above page restrictions. - if buf.Len() >= usermem.PageSize { + if buf.Len() >= hostarch.PageSize { // Returned at least one page already, nothing else to add. return nil } - remaining := usermem.PageSize - buf.Len() + remaining := hostarch.PageSize - buf.Len() if int(arEnvv.Length()) > remaining { end, ok := arEnvv.Start.AddLength(uint64(remaining)) if !ok { @@ -230,7 +231,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error { } arEnvv.End = end } - if _, err := m.CopyInTo(ctx, usermem.AddrRangeSeqOf(arEnvv), writer, usermem.IOOpts{}); err != nil { + if _, err := m.CopyInTo(ctx, hostarch.AddrRangeSeqOf(arEnvv), writer, usermem.IOOpts{}); err != nil { return err } @@ -323,7 +324,7 @@ func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset in // the system page size, and the write must be performed at the start of // the file ..." - user_namespaces(7) srclen := src.NumBytes() - if srclen >= usermem.PageSize || offset != 0 { + if srclen >= hostarch.PageSize || offset != 0 { return 0, syserror.EINVAL } b := make([]byte, srclen) @@ -481,7 +482,7 @@ func (fd *memFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64 defer m.DecUsers(ctx) // Buffer the read data because of MM locks buf := make([]byte, dst.NumBytes()) - n, readErr := m.CopyIn(ctx, usermem.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true}) + n, readErr := m.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true}) if n > 0 { if _, err := dst.CopyOut(ctx, buf[:n]); err != nil { return 0, syserror.EFAULT @@ -613,7 +614,7 @@ func (s *taskStatData) Generate(ctx context.Context, buf *bytes.Buffer) error { rss = mm.ResidentSetSize() } }) - fmt.Fprintf(buf, "%d %d ", vss, rss/usermem.PageSize) + fmt.Fprintf(buf, "%d %d ", vss, rss/hostarch.PageSize) // rsslim. fmt.Fprintf(buf, "%d ", s.task.ThreadGroup().Limits().Get(limits.Rss).Cur) @@ -655,7 +656,7 @@ func (s *statmData) Generate(ctx context.Context, buf *bytes.Buffer) error { } }) - fmt.Fprintf(buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize) + fmt.Fprintf(buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize) return nil } @@ -774,7 +775,7 @@ func (o *oomScoreAdj) Write(ctx context.Context, src usermem.IOSequence, offset } // Limit input size so as not to impact performance if input size is large. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) diff --git a/pkg/sentry/fsimpl/proc/task_net.go b/pkg/sentry/fsimpl/proc/task_net.go index d4f6a5a9b..177cb828f 100644 --- a/pkg/sentry/fsimpl/proc/task_net.go +++ b/pkg/sentry/fsimpl/proc/task_net.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/inet" @@ -34,7 +35,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/usermem" ) func (fs *filesystem) newTaskNetDir(ctx context.Context, task *kernel.Task) kernfs.Inode { @@ -295,7 +295,7 @@ func networkToHost16(n uint16) uint16 { // binary.BigEndian.Uint16() require a read of binary.BigEndian and an // interface method call, defeating inlining. buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)} - return usermem.ByteOrder.Uint16(buf[:]) + return hostarch.ByteOrder.Uint16(buf[:]) } func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { @@ -317,14 +317,14 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { // __be32 which is a typedef for an unsigned int, and is printed with // %X. This means that for a little-endian machine, Linux prints the // least-significant byte of the address first. To emulate this, we first - // invert the byte order for the address using usermem.ByteOrder.Uint32, + // invert the byte order for the address using hostarch.ByteOrder.Uint32, // which makes it have the equivalent encoding to a __be32 on a little // endian machine. Note that this operation is a no-op on a big endian // machine. Then similar to Linux, we format it with %X, which will print // the most-significant byte of the __be32 address first, which is now // actually the least-significant byte of the original address in // linux.SockAddrInet.Addr on little endian machines, due to the conversion. - addr := usermem.ByteOrder.Uint32(a.Addr[:]) + addr := hostarch.ByteOrder.Uint32(a.Addr[:]) fmt.Fprintf(w, "%08X:%04X ", addr, port) case linux.AF_INET6: @@ -334,10 +334,10 @@ func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { } port := networkToHost16(a.Port) - addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4]) - addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8]) - addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12]) - addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16]) + addr0 := hostarch.ByteOrder.Uint32(a.Addr[0:4]) + addr1 := hostarch.ByteOrder.Uint32(a.Addr[4:8]) + addr2 := hostarch.ByteOrder.Uint32(a.Addr[8:12]) + addr3 := hostarch.ByteOrder.Uint32(a.Addr[12:16]) fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port) } } @@ -739,10 +739,10 @@ func (d *netRouteData) Generate(ctx context.Context, buf *bytes.Buffer) error { ) if len(rt.GatewayAddr) == header.IPv4AddressSize { flags |= linux.RTF_GATEWAY - gw = usermem.ByteOrder.Uint32(rt.GatewayAddr) + gw = hostarch.ByteOrder.Uint32(rt.GatewayAddr) } if len(rt.DstAddr) == header.IPv4AddressSize { - prefix = usermem.ByteOrder.Uint32(rt.DstAddr) + prefix = hostarch.ByteOrder.Uint32(rt.DstAddr) } l := fmt.Sprintf( "%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d", diff --git a/pkg/sentry/fsimpl/proc/tasks_files.go b/pkg/sentry/fsimpl/proc/tasks_files.go index 01b7a6678..f0029cda6 100644 --- a/pkg/sentry/fsimpl/proc/tasks_files.go +++ b/pkg/sentry/fsimpl/proc/tasks_files.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -28,7 +29,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // +stateify savable @@ -270,7 +270,7 @@ func (*meminfoData) Generate(ctx context.Context, buf *bytes.Buffer) error { anon := snapshot.Anonymous + snapshot.Tmpfs file := snapshot.PageCache + snapshot.Mapped // We don't actually have active/inactive LRUs, so just make up numbers. - activeFile := (file / 2) &^ (usermem.PageSize - 1) + activeFile := (file / 2) &^ (hostarch.PageSize - 1) inactiveFile := file - activeFile fmt.Fprintf(buf, "MemTotal: %8d kB\n", totalSize/1024) diff --git a/pkg/sentry/fsimpl/proc/tasks_sys.go b/pkg/sentry/fsimpl/proc/tasks_sys.go index fb274b78e..9b14dd6b9 100644 --- a/pkg/sentry/fsimpl/proc/tasks_sys.go +++ b/pkg/sentry/fsimpl/proc/tasks_sys.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/sentry/kernel" @@ -214,7 +215,7 @@ func (d *tcpSackData) Write(ctx context.Context, src usermem.IOSequence, offset } // Limit the amount of memory allocated. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -262,7 +263,7 @@ func (d *tcpRecoveryData) Write(ctx context.Context, src usermem.IOSequence, off } // Limit the amount of memory allocated. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -318,7 +319,7 @@ func (d *tcpMemData) Write(ctx context.Context, src usermem.IOSequence, offset i defer d.mu.Unlock() // Limit the amount of memory allocated. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) size, err := d.readSizeLocked() if err != nil { return 0, err @@ -406,7 +407,7 @@ func (ipf *ipForwarding) Write(ctx context.Context, src usermem.IOSequence, offs } // Limit input size so as not to impact performance if input size is large. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) @@ -463,7 +464,7 @@ func (pr *portRange) Write(ctx context.Context, src usermem.IOSequence, offset i // Limit input size so as not to impact performance if input size is // large. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) ports := make([]int32, 2) n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts) diff --git a/pkg/sentry/fsimpl/proc/yama.go b/pkg/sentry/fsimpl/proc/yama.go index aebfe8944..e039ec45e 100644 --- a/pkg/sentry/fsimpl/proc/yama.go +++ b/pkg/sentry/fsimpl/proc/yama.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -62,7 +63,7 @@ func (s *yamaPtraceScope) Write(ctx context.Context, src usermem.IOSequence, off } // Limit the amount of memory allocated. - src = src.TakeFirst(usermem.PageSize - 1) + src = src.TakeFirst(hostarch.PageSize - 1) var v int32 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) diff --git a/pkg/sentry/fsimpl/testutil/BUILD b/pkg/sentry/fsimpl/testutil/BUILD index 400a97996..b3f9d1010 100644 --- a/pkg/sentry/fsimpl/testutil/BUILD +++ b/pkg/sentry/fsimpl/testutil/BUILD @@ -15,6 +15,7 @@ go_library( "//pkg/context", "//pkg/cpuid", "//pkg/fspath", + "//pkg/hostarch", "//pkg/memutil", "//pkg/sentry/fsbridge", "//pkg/sentry/fsimpl/tmpfs", diff --git a/pkg/sentry/fsimpl/testutil/testutil.go b/pkg/sentry/fsimpl/testutil/testutil.go index 1a8525b06..59e6f9c92 100644 --- a/pkg/sentry/fsimpl/testutil/testutil.go +++ b/pkg/sentry/fsimpl/testutil/testutil.go @@ -30,6 +30,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // System represents the context for a single test. @@ -105,7 +107,7 @@ func (s *System) Destroy() { // ReadToEnd reads the contents of fd until EOF to a string. func (s *System) ReadToEnd(fd *vfs.FileDescription) (string, error) { - buf := make([]byte, usermem.PageSize) + buf := make([]byte, hostarch.PageSize) bufIOSeq := usermem.BytesIOSequence(buf) opts := vfs.ReadOptions{} diff --git a/pkg/sentry/fsimpl/timerfd/BUILD b/pkg/sentry/fsimpl/timerfd/BUILD index fbb02a271..7ce7dc429 100644 --- a/pkg/sentry/fsimpl/timerfd/BUILD +++ b/pkg/sentry/fsimpl/timerfd/BUILD @@ -8,6 +8,7 @@ go_library( visibility = ["//pkg/sentry:internal"], deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/kernel/time", "//pkg/sentry/vfs", "//pkg/syserror", diff --git a/pkg/sentry/fsimpl/timerfd/timerfd.go b/pkg/sentry/fsimpl/timerfd/timerfd.go index 64d33c3a8..cbb8b67c5 100644 --- a/pkg/sentry/fsimpl/timerfd/timerfd.go +++ b/pkg/sentry/fsimpl/timerfd/timerfd.go @@ -19,6 +19,7 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" @@ -72,7 +73,7 @@ func (tfd *TimerFileDescription) Read(ctx context.Context, dst usermem.IOSequenc } if val := atomic.SwapUint64(&tfd.val, 0); val != 0 { var buf [sizeofUint64]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) if _, err := dst.CopyOut(ctx, buf[:]); err != nil { // Linux does not undo consuming the number of // expirations even if writing to userspace fails. diff --git a/pkg/sentry/fsimpl/tmpfs/BUILD b/pkg/sentry/fsimpl/tmpfs/BUILD index 09957c2b7..e21fddd7f 100644 --- a/pkg/sentry/fsimpl/tmpfs/BUILD +++ b/pkg/sentry/fsimpl/tmpfs/BUILD @@ -59,6 +59,7 @@ go_library( "//pkg/amutex", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file.go b/pkg/sentry/fsimpl/tmpfs/regular_file.go index a6d161882..cd849e87e 100644 --- a/pkg/sentry/fsimpl/tmpfs/regular_file.go +++ b/pkg/sentry/fsimpl/tmpfs/regular_file.go @@ -22,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" @@ -224,7 +225,7 @@ func (rf *regularFile) truncateLocked(newSize uint64) (bool, error) { } // AddMapping implements memmap.Mappable.AddMapping. -func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { +func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { rf.mapsMu.Lock() defer rf.mapsMu.Unlock() rf.dataMu.RLock() @@ -240,7 +241,7 @@ func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, a pagesBefore := rf.writableMappingPages // ar is guaranteed to be page aligned per memmap.Mappable. - rf.writableMappingPages += uint64(ar.Length() / usermem.PageSize) + rf.writableMappingPages += uint64(ar.Length() / hostarch.PageSize) if rf.writableMappingPages < pagesBefore { panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, rf.writableMappingPages)) @@ -251,7 +252,7 @@ func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, a } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { +func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { rf.mapsMu.Lock() defer rf.mapsMu.Unlock() @@ -261,7 +262,7 @@ func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace pagesBefore := rf.writableMappingPages // ar is guaranteed to be page aligned per memmap.Mappable. - rf.writableMappingPages -= uint64(ar.Length() / usermem.PageSize) + rf.writableMappingPages -= uint64(ar.Length() / hostarch.PageSize) if rf.writableMappingPages > pagesBefore { panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, rf.writableMappingPages)) @@ -270,12 +271,12 @@ func (rf *regularFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace } // CopyMapping implements memmap.Mappable.CopyMapping. -func (rf *regularFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { +func (rf *regularFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { return rf.AddMapping(ctx, ms, dstAR, offset, writable) } // Translate implements memmap.Mappable.Translate. -func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { rf.dataMu.Lock() defer rf.dataMu.Unlock() @@ -307,7 +308,7 @@ func (rf *regularFile) Translate(ctx context.Context, required, optional memmap. Source: segMR, File: rf.memFile, Offset: seg.FileRangeOf(segMR).Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }) translatedEnd = segMR.End } @@ -539,7 +540,7 @@ func (rw *regularFileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, er switch { case seg.Ok(): // Get internal mappings. - ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Read) if err != nil { return done, err } @@ -608,7 +609,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, // // See Linux, mm/filemap.c:generic_perform_write() and // mm/shmem.c:shmem_write_begin(). - if pgstart := uint64(usermem.Addr(rw.file.size).RoundDown()); end > pgstart { + if pgstart := uint64(hostarch.Addr(rw.file.size).RoundDown()); end > pgstart { end = pgstart } if end <= rw.off { @@ -619,8 +620,8 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, // Page-aligned mr for when we need to allocate memory. RoundUp can't // overflow since end is an int64. - pgstartaddr := usermem.Addr(rw.off).RoundDown() - pgendaddr, _ := usermem.Addr(end).RoundUp() + pgstartaddr := hostarch.Addr(rw.off).RoundDown() + pgendaddr, _ := hostarch.Addr(end).RoundUp() pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)} var ( @@ -633,7 +634,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, switch { case seg.Ok(): // Get internal mappings. - ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) + ims, err := rw.file.memFile.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), hostarch.Write) if err != nil { retErr = err goto exitLoop diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go index 8df81f589..9ae25ce9e 100644 --- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go +++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go @@ -36,6 +36,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/pgalloc" @@ -43,7 +44,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs/memxattr" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // Name is the default filesystem name. @@ -252,8 +252,8 @@ func (d *dentry) releaseChildrenLocked(ctx context.Context) { // immutable var globalStatfs = linux.Statfs{ Type: linux.TMPFS_MAGIC, - BlockSize: usermem.PageSize, - FragmentSize: usermem.PageSize, + BlockSize: hostarch.PageSize, + FragmentSize: hostarch.PageSize, NameLength: linux.NAME_MAX, // tmpfs currently does not support configurable size limits. In Linux, @@ -263,9 +263,9 @@ var globalStatfs = linux.Statfs{ // chosen to ensure that BlockSize * Blocks does not overflow int64 (which // applications may also handle incorrectly). // TODO(b/29637826): allow configuring a tmpfs size and enforce it. - Blocks: math.MaxInt64 / usermem.PageSize, - BlocksFree: math.MaxInt64 / usermem.PageSize, - BlocksAvailable: math.MaxInt64 / usermem.PageSize, + Blocks: math.MaxInt64 / hostarch.PageSize, + BlocksFree: math.MaxInt64 / hostarch.PageSize, + BlocksAvailable: math.MaxInt64 / hostarch.PageSize, } // dentry implements vfs.DentryImpl. @@ -485,7 +485,7 @@ func (i *inode) statTo(stat *linux.Statx) { linux.STATX_UID | linux.STATX_GID | linux.STATX_INO | linux.STATX_SIZE | linux.STATX_BLOCKS | linux.STATX_ATIME | linux.STATX_CTIME | linux.STATX_MTIME - stat.Blksize = usermem.PageSize + stat.Blksize = hostarch.PageSize stat.Nlink = atomic.LoadUint32(&i.nlink) stat.UID = atomic.LoadUint32(&i.uid) stat.GID = atomic.LoadUint32(&i.gid) diff --git a/pkg/sentry/fsimpl/verity/BUILD b/pkg/sentry/fsimpl/verity/BUILD index e265be0ee..2da251233 100644 --- a/pkg/sentry/fsimpl/verity/BUILD +++ b/pkg/sentry/fsimpl/verity/BUILD @@ -14,6 +14,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/marshal/primitive", "//pkg/merkletree", "//pkg/refsvfs2", diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go index 0d9b0ee2c..a7d92a878 100644 --- a/pkg/sentry/fsimpl/verity/verity.go +++ b/pkg/sentry/fsimpl/verity/verity.go @@ -55,6 +55,8 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) const ( @@ -1033,7 +1035,7 @@ func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) { } // measureVerity returns the hash of fd, saved in verityDigest. -func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest usermem.Addr) (uintptr, error) { +func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest hostarch.Addr) (uintptr, error) { t := kernel.TaskFromContext(ctx) if t == nil { return 0, syserror.EINVAL @@ -1072,11 +1074,11 @@ func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest userm } // Now copy the root hash bytes to the memory after metadata. - _, err := t.CopyOutBytes(usermem.Addr(uintptr(verityDigest)+linux.SizeOfDigestMetadata), fd.d.hash) + _, err := t.CopyOutBytes(hostarch.Addr(uintptr(verityDigest)+linux.SizeOfDigestMetadata), fd.d.hash) return 0, err } -func (fd *fileDescription) verityFlags(ctx context.Context, flags usermem.Addr) (uintptr, error) { +func (fd *fileDescription) verityFlags(ctx context.Context, flags hostarch.Addr) (uintptr, error) { f := int32(0) fd.d.hashMu.RLock() diff --git a/pkg/sentry/hostmm/BUILD b/pkg/sentry/hostmm/BUILD index 300b7ccce..66fa1ad40 100644 --- a/pkg/sentry/hostmm/BUILD +++ b/pkg/sentry/hostmm/BUILD @@ -13,8 +13,8 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/fd", + "//pkg/hostarch", "//pkg/log", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/hostmm/hostmm.go b/pkg/sentry/hostmm/hostmm.go index c47b96b54..285ea9050 100644 --- a/pkg/sentry/hostmm/hostmm.go +++ b/pkg/sentry/hostmm/hostmm.go @@ -23,8 +23,8 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/pkg/usermem" ) // NotifyCurrentMemcgPressureCallback requests that f is called whenever the @@ -88,7 +88,7 @@ func NotifyCurrentMemcgPressureCallback(f func(), level string) (func(), error) if n != sizeofUint64 { panic(fmt.Sprintf("short read from memory pressure level eventfd: got %d bytes, wanted %d", n, sizeofUint64)) } - val := usermem.ByteOrder.Uint64(buf[:]) + val := hostarch.ByteOrder.Uint64(buf[:]) if val >= stopVal { // Assume this was due to the notifier's "destructor" (the // function returned by NotifyCurrentMemcgPressureCallback @@ -103,7 +103,7 @@ func NotifyCurrentMemcgPressureCallback(f func(), level string) (func(), error) return func() { rw := fd.NewReadWriter(eventFD.FD()) var buf [sizeofUint64]byte - usermem.ByteOrder.PutUint64(buf[:], stopVal) + hostarch.ByteOrder.PutUint64(buf[:], stopVal) for { n, err := rw.Write(buf[:]) if err != nil { diff --git a/pkg/sentry/kernel/BUILD b/pkg/sentry/kernel/BUILD index c53e3e720..e9eb89378 100644 --- a/pkg/sentry/kernel/BUILD +++ b/pkg/sentry/kernel/BUILD @@ -226,6 +226,7 @@ go_library( "//pkg/eventchannel", "//pkg/fspath", "//pkg/goid", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", @@ -294,6 +295,7 @@ go_test( deps = [ "//pkg/abi", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/arch", "//pkg/sentry/contexttest", "//pkg/sentry/fs", @@ -305,6 +307,5 @@ go_test( "//pkg/sentry/usage", "//pkg/sync", "//pkg/syserror", - "//pkg/usermem", ], ) diff --git a/pkg/sentry/kernel/eventfd/BUILD b/pkg/sentry/kernel/eventfd/BUILD index 7ecbd29ab..564c3d42e 100644 --- a/pkg/sentry/kernel/eventfd/BUILD +++ b/pkg/sentry/kernel/eventfd/BUILD @@ -10,6 +10,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fdnotifier", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/fs/anon", "//pkg/sentry/fs/fsutil", diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go index 2aca02fd5..4466fbc9d 100644 --- a/pkg/sentry/kernel/eventfd/eventfd.go +++ b/pkg/sentry/kernel/eventfd/eventfd.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/fs/anon" "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" @@ -186,7 +187,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro e.wq.Notify(waiter.WritableEvents) var buf [8]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) _, err := dst.CopyOut(ctx, buf[:]) return err } @@ -194,7 +195,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro // Must be called with e.mu locked. func (e *EventOperations) hostWrite(val uint64) error { var buf [8]byte - usermem.ByteOrder.PutUint64(buf[:], val) + hostarch.ByteOrder.PutUint64(buf[:], val) _, err := unix.Write(e.hostfd, buf[:]) if err == unix.EWOULDBLOCK { return syserror.ErrWouldBlock @@ -207,7 +208,7 @@ func (e *EventOperations) write(ctx context.Context, src usermem.IOSequence) err if _, err := src.CopyIn(ctx, buf[:]); err != nil { return err } - val := usermem.ByteOrder.Uint64(buf[:]) + val := hostarch.ByteOrder.Uint64(buf[:]) return e.Signal(val) } diff --git a/pkg/sentry/kernel/futex/BUILD b/pkg/sentry/kernel/futex/BUILD index 041e3d4ca..a75686cf3 100644 --- a/pkg/sentry/kernel/futex/BUILD +++ b/pkg/sentry/kernel/futex/BUILD @@ -37,6 +37,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/sentry/memmap", "//pkg/sync", @@ -52,8 +53,8 @@ go_test( library = ":futex", deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/sync", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/kernel/futex/futex.go b/pkg/sentry/kernel/futex/futex.go index e4dcc4d40..0427cf3f4 100644 --- a/pkg/sentry/kernel/futex/futex.go +++ b/pkg/sentry/kernel/futex/futex.go @@ -20,10 +20,10 @@ package futex import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // KeyKind indicates the type of a Key. @@ -83,8 +83,8 @@ func (k *Key) clone() Key { } // Preconditions: k.Kind == KindPrivate or KindSharedPrivate. -func (k *Key) addr() usermem.Addr { - return usermem.Addr(k.Offset) +func (k *Key) addr() hostarch.Addr { + return hostarch.Addr(k.Offset) } // matches returns true if a wakeup on k2 should wake a waiter waiting on k. @@ -97,14 +97,14 @@ func (k *Key) matches(k2 *Key) bool { type Target interface { context.Context - // SwapUint32 gives access to usermem.IO.SwapUint32. - SwapUint32(addr usermem.Addr, new uint32) (uint32, error) + // SwapUint32 gives access to hostarch.IO.SwapUint32. + SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) - // CompareAndSwap gives access to usermem.IO.CompareAndSwapUint32. - CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) + // CompareAndSwap gives access to hostarch.IO.CompareAndSwapUint32. + CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) - // LoadUint32 gives access to usermem.IO.LoadUint32. - LoadUint32(addr usermem.Addr) (uint32, error) + // LoadUint32 gives access to hostarch.IO.LoadUint32. + LoadUint32(addr hostarch.Addr) (uint32, error) // GetSharedKey returns a Key with kind KindSharedPrivate or // KindSharedMappable corresponding to the memory mapped at address addr. @@ -112,11 +112,11 @@ type Target interface { // If GetSharedKey returns a Key with a non-nil MappingIdentity, a // reference is held on the MappingIdentity, which must be dropped by the // caller when the Key is no longer in use. - GetSharedKey(addr usermem.Addr) (Key, error) + GetSharedKey(addr hostarch.Addr) (Key, error) } // check performs a basic equality check on the given address. -func check(t Target, addr usermem.Addr, val uint32) error { +func check(t Target, addr hostarch.Addr, val uint32) error { cur, err := t.LoadUint32(addr) if err != nil { return err @@ -128,7 +128,7 @@ func check(t Target, addr usermem.Addr, val uint32) error { } // atomicOp performs a complex operation on the given address. -func atomicOp(t Target, addr usermem.Addr, opIn uint32) (bool, error) { +func atomicOp(t Target, addr hostarch.Addr, opIn uint32) (bool, error) { opType := (opIn >> 28) & 0xf cmp := (opIn >> 24) & 0xf opArg := (opIn >> 12) & 0xfff @@ -328,7 +328,7 @@ const ( ) // getKey returns a Key representing address addr in c. -func getKey(t Target, addr usermem.Addr, private bool) (Key, error) { +func getKey(t Target, addr hostarch.Addr, private bool) (Key, error) { // Ensure the address is aligned. // It must be a DWORD boundary. if addr&0x3 != 0 { @@ -341,7 +341,7 @@ func getKey(t Target, addr usermem.Addr, private bool) (Key, error) { } // bucketIndexForAddr returns the index into Manager.buckets for addr. -func bucketIndexForAddr(addr usermem.Addr) uintptr { +func bucketIndexForAddr(addr hostarch.Addr) uintptr { // - The bottom 2 bits of addr must be 0, per getKey. // // - On amd64, the top 16 bits of addr (bits 48-63) must be equal to bit 47 @@ -448,7 +448,7 @@ func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) { // Wake wakes up to n waiters matching the bitmask on the given addr. // The number of waiters woken is returned. -func (m *Manager) Wake(t Target, addr usermem.Addr, private bool, bitmask uint32, n int) (int, error) { +func (m *Manager) Wake(t Target, addr hostarch.Addr, private bool, bitmask uint32, n int) (int, error) { // This function is very hot; avoid defer. k, err := getKey(t, addr, private) if err != nil { @@ -463,7 +463,7 @@ func (m *Manager) Wake(t Target, addr usermem.Addr, private bool, bitmask uint32 return r, nil } -func (m *Manager) doRequeue(t Target, addr, naddr usermem.Addr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) { +func (m *Manager) doRequeue(t Target, addr, naddr hostarch.Addr, private bool, checkval bool, val uint32, nwake int, nreq int) (int, error) { k1, err := getKey(t, addr, private) if err != nil { return 0, err @@ -498,14 +498,14 @@ func (m *Manager) doRequeue(t Target, addr, naddr usermem.Addr, private bool, ch // Requeue wakes up to nwake waiters on the given addr, and unconditionally // requeues up to nreq waiters on naddr. -func (m *Manager) Requeue(t Target, addr, naddr usermem.Addr, private bool, nwake int, nreq int) (int, error) { +func (m *Manager) Requeue(t Target, addr, naddr hostarch.Addr, private bool, nwake int, nreq int) (int, error) { return m.doRequeue(t, addr, naddr, private, false, 0, nwake, nreq) } // RequeueCmp atomically checks that the addr contains val (via the Target), // wakes up to nwake waiters on addr and then unconditionally requeues nreq // waiters on naddr. -func (m *Manager) RequeueCmp(t Target, addr, naddr usermem.Addr, private bool, val uint32, nwake int, nreq int) (int, error) { +func (m *Manager) RequeueCmp(t Target, addr, naddr hostarch.Addr, private bool, val uint32, nwake int, nreq int) (int, error) { return m.doRequeue(t, addr, naddr, private, true, val, nwake, nreq) } @@ -513,7 +513,7 @@ func (m *Manager) RequeueCmp(t Target, addr, naddr usermem.Addr, private bool, v // waiters unconditionally from addr1, and, based on the original value at addr2 // and a comparison encoded in op, wakes up to nwake2 waiters from addr2. // It returns the total number of waiters woken. -func (m *Manager) WakeOp(t Target, addr1, addr2 usermem.Addr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) { +func (m *Manager) WakeOp(t Target, addr1, addr2 hostarch.Addr, private bool, nwake1 int, nwake2 int, op uint32) (int, error) { k1, err := getKey(t, addr1, private) if err != nil { return 0, err @@ -553,7 +553,7 @@ func (m *Manager) WakeOp(t Target, addr1, addr2 usermem.Addr, private bool, nwak // enqueues w to be woken by a send to w.C. If WaitPrepare returns nil, the // Waiter must be subsequently removed by calling WaitComplete, whether or not // a wakeup is received on w.C. -func (m *Manager) WaitPrepare(w *Waiter, t Target, addr usermem.Addr, private bool, val uint32, bitmask uint32) error { +func (m *Manager) WaitPrepare(w *Waiter, t Target, addr hostarch.Addr, private bool, val uint32, bitmask uint32) error { k, err := getKey(t, addr, private) if err != nil { return err @@ -631,7 +631,7 @@ func (m *Manager) WaitComplete(w *Waiter, t Target) { // FUTEX_OWNER_DIED is only set by the Linux when robust lists are in use (see // exit_robust_list()). Given we don't support robust lists, although handled // below, it's never set. -func (m *Manager) LockPI(w *Waiter, t Target, addr usermem.Addr, tid uint32, private, try bool) (bool, error) { +func (m *Manager) LockPI(w *Waiter, t Target, addr hostarch.Addr, tid uint32, private, try bool) (bool, error) { k, err := getKey(t, addr, private) if err != nil { return false, err @@ -663,7 +663,7 @@ func (m *Manager) LockPI(w *Waiter, t Target, addr usermem.Addr, tid uint32, pri return success, nil } -func (m *Manager) lockPILocked(w *Waiter, t Target, addr usermem.Addr, tid uint32, b *bucket, try bool) (bool, error) { +func (m *Manager) lockPILocked(w *Waiter, t Target, addr hostarch.Addr, tid uint32, b *bucket, try bool) (bool, error) { for { cur, err := t.LoadUint32(addr) if err != nil { @@ -724,7 +724,7 @@ func (m *Manager) lockPILocked(w *Waiter, t Target, addr usermem.Addr, tid uint3 // The address provided must contain the caller's TID. If there are waiters, // TID of the next waiter (FIFO) is set to the given address, and the waiter // woken up. If there are no waiters, 0 is set to the address. -func (m *Manager) UnlockPI(t Target, addr usermem.Addr, tid uint32, private bool) error { +func (m *Manager) UnlockPI(t Target, addr hostarch.Addr, tid uint32, private bool) error { k, err := getKey(t, addr, private) if err != nil { return err @@ -738,7 +738,7 @@ func (m *Manager) UnlockPI(t Target, addr usermem.Addr, tid uint32, private bool return err } -func (m *Manager) unlockPILocked(t Target, addr usermem.Addr, tid uint32, b *bucket, key *Key) error { +func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bucket, key *Key) error { cur, err := t.LoadUint32(addr) if err != nil { return err diff --git a/pkg/sentry/kernel/futex/futex_test.go b/pkg/sentry/kernel/futex/futex_test.go index ba7f95d8a..deba44e5c 100644 --- a/pkg/sentry/kernel/futex/futex_test.go +++ b/pkg/sentry/kernel/futex/futex_test.go @@ -23,8 +23,8 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // testData implements the Target interface, and allows us to @@ -43,23 +43,23 @@ func newTestData(size uint) testData { } } -func (t testData) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) { +func (t testData) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) { val := atomic.SwapUint32((*uint32)(unsafe.Pointer(&t.data[addr])), new) return val, nil } -func (t testData) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) { +func (t testData) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) { if atomic.CompareAndSwapUint32((*uint32)(unsafe.Pointer(&t.data[addr])), old, new) { return old, nil } return atomic.LoadUint32((*uint32)(unsafe.Pointer(&t.data[addr]))), nil } -func (t testData) LoadUint32(addr usermem.Addr) (uint32, error) { +func (t testData) LoadUint32(addr hostarch.Addr) (uint32, error) { return atomic.LoadUint32((*uint32)(unsafe.Pointer(&t.data[addr]))), nil } -func (t testData) GetSharedKey(addr usermem.Addr) (Key, error) { +func (t testData) GetSharedKey(addr hostarch.Addr) (Key, error) { return Key{ Kind: KindSharedMappable, Offset: uint64(addr), @@ -73,7 +73,7 @@ func futexKind(private bool) string { return "shared" } -func newPreparedTestWaiter(t *testing.T, m *Manager, ta Target, addr usermem.Addr, private bool, val uint32, bitmask uint32) *Waiter { +func newPreparedTestWaiter(t *testing.T, m *Manager, ta Target, addr hostarch.Addr, private bool, val uint32, bitmask uint32) *Waiter { w := NewWaiter() if err := m.WaitPrepare(w, ta, addr, private, val, bitmask); err != nil { t.Fatalf("WaitPrepare failed: %v", err) @@ -463,12 +463,12 @@ const ( // Beyond being used as a Locker, this is a simple mechanism for // changing the underlying values for simpler tests. type testMutex struct { - a usermem.Addr + a hostarch.Addr d testData m *Manager } -func newTestMutex(addr usermem.Addr, d testData, m *Manager) *testMutex { +func newTestMutex(addr hostarch.Addr, d testData, m *Manager) *testMutex { return &testMutex{a: addr, d: d, m: m} } diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go index 4fcdfc541..4b943106b 100644 --- a/pkg/sentry/kernel/kcov.go +++ b/pkg/sentry/kernel/kcov.go @@ -22,13 +22,13 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/coverage" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/mm" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov @@ -130,7 +130,7 @@ func (kcov *Kcov) InitTrace(size uint64) error { // To simplify all the logic around mapping, we require that the length of the // shared region is a multiple of the system page size. - if (8*size)&(usermem.PageSize-1) != 0 { + if (8*size)&(hostarch.PageSize-1) != 0 { return syserror.EINVAL } @@ -286,7 +286,7 @@ func (rw *kcovReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { } // Get internal mappings. - bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Read) + bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Read) if err != nil { return 0, err } @@ -314,7 +314,7 @@ func (rw *kcovReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) } // Get internal mapping. - bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, usermem.Write) + bs, err := rw.mf.MapInternal(memmap.FileRange{start, end}, hostarch.Write) if err != nil { return 0, err } diff --git a/pkg/sentry/kernel/pipe/BUILD b/pkg/sentry/kernel/pipe/BUILD index beba6d97d..34c617b08 100644 --- a/pkg/sentry/kernel/pipe/BUILD +++ b/pkg/sentry/kernel/pipe/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/abi/linux", "//pkg/amutex", "//pkg/context", + "//pkg/hostarch", "//pkg/marshal/primitive", "//pkg/safemem", "//pkg/sentry/arch", diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go index d004f2357..06769931a 100644 --- a/pkg/sentry/kernel/pipe/pipe.go +++ b/pkg/sentry/kernel/pipe/pipe.go @@ -22,18 +22,18 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" "gvisor.dev/gvisor/pkg/waiter" ) const ( // MinimumPipeSize is a hard limit of the minimum size of a pipe. // It corresponds to fs/pipe.c:pipe_min_size. - MinimumPipeSize = usermem.PageSize + MinimumPipeSize = hostarch.PageSize // MaximumPipeSize is a hard limit on the maximum size of a pipe. // It corresponds to fs/pipe.c:pipe_max_size. @@ -41,7 +41,7 @@ const ( // DefaultPipeSize is the system-wide default size of a pipe in bytes. // It corresponds to pipe_fs_i.h:PIPE_DEF_BUFFERS. - DefaultPipeSize = 16 * usermem.PageSize + DefaultPipeSize = 16 * hostarch.PageSize // atomicIOBytes is the maximum number of bytes that the pipe will // guarantee atomic reads or writes atomically. diff --git a/pkg/sentry/kernel/pipe/vfs.go b/pkg/sentry/kernel/pipe/vfs.go index e524afad5..95b948edb 100644 --- a/pkg/sentry/kernel/pipe/vfs.go +++ b/pkg/sentry/kernel/pipe/vfs.go @@ -17,6 +17,7 @@ package pipe import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/vfs" @@ -274,7 +275,7 @@ func (fd *VFSPipeFD) SpliceToNonPipe(ctx context.Context, out *vfs.FileDescripti } src := usermem.IOSequence{ IO: fd, - Addrs: usermem.AddrRangeSeqOf(usermem.AddrRange{0, usermem.Addr(count)}), + Addrs: hostarch.AddrRangeSeqOf(hostarch.AddrRange{0, hostarch.Addr(count)}), } var ( @@ -302,7 +303,7 @@ func (fd *VFSPipeFD) SpliceToNonPipe(ctx context.Context, out *vfs.FileDescripti func (fd *VFSPipeFD) SpliceFromNonPipe(ctx context.Context, in *vfs.FileDescription, off, count int64) (int64, error) { dst := usermem.IOSequence{ IO: fd, - Addrs: usermem.AddrRangeSeqOf(usermem.AddrRange{0, usermem.Addr(count)}), + Addrs: hostarch.AddrRangeSeqOf(hostarch.AddrRange{0, hostarch.Addr(count)}), } var ( @@ -328,7 +329,7 @@ func (fd *VFSPipeFD) SpliceFromNonPipe(ctx context.Context, in *vfs.FileDescript // fd.pipe.Notify(waiter.WritableEvents) after the read is completed. // // Preconditions: fd.pipe.mu must be locked. -func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) { +func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) { n, err := fd.pipe.peekLocked(int64(len(dst)), func(srcs safemem.BlockSeq) (uint64, error) { return safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), srcs) }) @@ -340,7 +341,7 @@ func (fd *VFSPipeFD) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, // is completed. // // Preconditions: fd.pipe.mu must be locked. -func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) { +func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) { n, err := fd.pipe.writeLocked(int64(len(src)), func(dsts safemem.BlockSeq) (uint64, error) { return safemem.CopySeq(dsts, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src))) }) @@ -350,7 +351,7 @@ func (fd *VFSPipeFD) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, // ZeroOut implements usermem.IO.ZeroOut. // // Preconditions: fd.pipe.mu must be locked. -func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { +func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { n, err := fd.pipe.writeLocked(toZero, func(dsts safemem.BlockSeq) (uint64, error) { return safemem.ZeroSeq(dsts) }) @@ -362,7 +363,7 @@ func (fd *VFSPipeFD) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int6 // fd.pipe.Notify(waiter.WritableEvents) after the read is completed. // // Preconditions: fd.pipe.mu must be locked. -func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { +func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { return fd.pipe.peekLocked(ars.NumBytes(), func(srcs safemem.BlockSeq) (uint64, error) { return dst.WriteFromBlocks(srcs) }) @@ -373,25 +374,25 @@ func (fd *VFSPipeFD) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst // is completed. // // Preconditions: fd.pipe.mu must be locked. -func (fd *VFSPipeFD) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { +func (fd *VFSPipeFD) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { return fd.pipe.writeLocked(ars.NumBytes(), func(dsts safemem.BlockSeq) (uint64, error) { return src.ReadToBlocks(dsts) }) } // SwapUint32 implements usermem.IO.SwapUint32. -func (fd *VFSPipeFD) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) { +func (fd *VFSPipeFD) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) { // How did a pipe get passed as the virtual address space to futex(2)? panic("VFSPipeFD.SwapUint32 called unexpectedly") } // CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32. -func (fd *VFSPipeFD) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) { +func (fd *VFSPipeFD) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) { panic("VFSPipeFD.CompareAndSwapUint32 called unexpectedly") } // LoadUint32 implements usermem.IO.LoadUint32. -func (fd *VFSPipeFD) LoadUint32(ctx context.Context, addr usermem.Addr, opts usermem.IOOpts) (uint32, error) { +func (fd *VFSPipeFD) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) { panic("VFSPipeFD.LoadUint32 called unexpectedly") } diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go index f5a60e749..57c7659e7 100644 --- a/pkg/sentry/kernel/ptrace.go +++ b/pkg/sentry/kernel/ptrace.go @@ -19,6 +19,7 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/mm" @@ -1011,7 +1012,7 @@ func (t *Task) ptraceSetOptionsLocked(opts uintptr) error { } // Ptrace implements the ptrace system call. -func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error { +func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error { // PTRACE_TRACEME ignores all other arguments. if req == linux.PTRACE_TRACEME { return t.ptraceTraceme() @@ -1190,7 +1191,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error { panic(fmt.Sprintf("%#x + %#x overflows. Invalid reg size > %#x", ar.Start, n, ar.Length())) } ar.End = end - return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar)) + return t.CopyOutIovecs(data, hostarch.AddrRangeSeqOf(ar)) case linux.PTRACE_SETREGSET: ars, err := t.CopyInIovecs(data, 1) @@ -1214,8 +1215,8 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error { return err } t.p.FullStateChanged() - ar.End -= usermem.Addr(n) - return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar)) + ar.End -= hostarch.Addr(n) + return t.CopyOutIovecs(data, hostarch.AddrRangeSeqOf(ar)) case linux.PTRACE_GETSIGINFO: t.tg.pidns.owner.mu.RLock() @@ -1267,7 +1268,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error { case linux.PTRACE_GETEVENTMSG: t.tg.pidns.owner.mu.RLock() defer t.tg.pidns.owner.mu.RUnlock() - _, err := primitive.CopyUint64Out(t, usermem.Addr(data), target.ptraceEventMsg) + _, err := primitive.CopyUint64Out(t, hostarch.Addr(data), target.ptraceEventMsg) return err // PEEKSIGINFO is unimplemented but seems to have no users anywhere. diff --git a/pkg/sentry/kernel/ptrace_amd64.go b/pkg/sentry/kernel/ptrace_amd64.go index 7aea3dcd8..5ae05b5c3 100644 --- a/pkg/sentry/kernel/ptrace_amd64.go +++ b/pkg/sentry/kernel/ptrace_amd64.go @@ -18,12 +18,13 @@ package kernel import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" ) // ptraceArch implements arch-specific ptrace commands. -func (t *Task) ptraceArch(target *Task, req int64, addr, data usermem.Addr) error { +func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) error { switch req { case linux.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER n, err := target.Arch().PtracePeekUser(uintptr(addr)) diff --git a/pkg/sentry/kernel/ptrace_arm64.go b/pkg/sentry/kernel/ptrace_arm64.go index d971b96b3..46dd84cbc 100644 --- a/pkg/sentry/kernel/ptrace_arm64.go +++ b/pkg/sentry/kernel/ptrace_arm64.go @@ -17,11 +17,11 @@ package kernel import ( + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // ptraceArch implements arch-specific ptrace commands. -func (t *Task) ptraceArch(target *Task, req int64, addr, data usermem.Addr) error { +func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) error { return syserror.EIO } diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go index 2a9023fdf..4bc5bca44 100644 --- a/pkg/sentry/kernel/rseq.go +++ b/pkg/sentry/kernel/rseq.go @@ -18,6 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/hostcpu" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" @@ -43,8 +44,8 @@ type OldRSeqCriticalRegion struct { // application handler while its instruction pointer is in CriticalSection, // set the instruction pointer to Restart and application register r10 (on // amd64) to the former instruction pointer. - CriticalSection usermem.AddrRange - Restart usermem.Addr + CriticalSection hostarch.AddrRange + Restart hostarch.Addr } // RSeqAvailable returns true if t supports (old and new) restartable sequences. @@ -55,7 +56,7 @@ func (t *Task) RSeqAvailable() bool { // SetRSeq registers addr as this thread's rseq structure. // // Preconditions: The caller must be running on the task goroutine. -func (t *Task) SetRSeq(addr usermem.Addr, length, signature uint32) error { +func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error { if t.rseqAddr != 0 { if t.rseqAddr != addr { return syserror.EINVAL @@ -100,7 +101,7 @@ func (t *Task) SetRSeq(addr usermem.Addr, length, signature uint32) error { // ClearRSeq unregisters addr as this thread's rseq structure. // // Preconditions: The caller must be running on the task goroutine. -func (t *Task) ClearRSeq(addr usermem.Addr, length, signature uint32) error { +func (t *Task) ClearRSeq(addr hostarch.Addr, length, signature uint32) error { if t.rseqAddr == 0 { return syserror.EINVAL } @@ -166,7 +167,7 @@ func (t *Task) SetOldRSeqCriticalRegion(r OldRSeqCriticalRegion) error { // CPU number. // // Preconditions: The caller must be running on the task goroutine. -func (t *Task) OldRSeqCPUAddr() usermem.Addr { +func (t *Task) OldRSeqCPUAddr() hostarch.Addr { return t.oldRSeqCPUAddr } @@ -177,7 +178,7 @@ func (t *Task) OldRSeqCPUAddr() usermem.Addr { // * t.RSeqAvailable() == true. // * The caller must be running on the task goroutine. // * t's AddressSpace must be active. -func (t *Task) SetOldRSeqCPUAddr(addr usermem.Addr) error { +func (t *Task) SetOldRSeqCPUAddr(addr hostarch.Addr) error { t.oldRSeqCPUAddr = addr // Check that addr is writable. @@ -221,7 +222,7 @@ func (t *Task) oldRSeqCopyOutCPU() error { } buf := t.CopyScratchBuffer(4) - usermem.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) + hostarch.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) _, err := t.CopyOutBytes(t.oldRSeqCPUAddr, buf) return err } @@ -236,8 +237,8 @@ func (t *Task) rseqCopyOutCPU() error { buf := t.CopyScratchBuffer(8) // CPUIDStart and CPUID are the first two fields in linux.RSeq. - usermem.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) // CPUIDStart - usermem.ByteOrder.PutUint32(buf[4:], uint32(t.rseqCPU)) // CPUID + hostarch.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) // CPUIDStart + hostarch.ByteOrder.PutUint32(buf[4:], uint32(t.rseqCPU)) // CPUID // N.B. This write is not atomic, but since this occurs on the task // goroutine then as long as userspace uses a single-instruction read // it can't see an invalid value. @@ -251,8 +252,8 @@ func (t *Task) rseqCopyOutCPU() error { func (t *Task) rseqClearCPU() error { buf := t.CopyScratchBuffer(8) // CPUIDStart and CPUID are the first two fields in linux.RSeq. - usermem.ByteOrder.PutUint32(buf, 0) // CPUIDStart - usermem.ByteOrder.PutUint32(buf[4:], linux.RSEQ_CPU_ID_UNINITIALIZED) // CPUID + hostarch.ByteOrder.PutUint32(buf, 0) // CPUIDStart + hostarch.ByteOrder.PutUint32(buf[4:], linux.RSEQ_CPU_ID_UNINITIALIZED) // CPUID // N.B. This write is not atomic, but since this occurs on the task // goroutine then as long as userspace uses a single-instruction read // it can't see an invalid value. @@ -305,7 +306,7 @@ func (t *Task) rseqAddrInterrupt() { return } - critAddr := usermem.Addr(usermem.ByteOrder.Uint64(buf)) + critAddr := hostarch.Addr(hostarch.ByteOrder.Uint64(buf)) if critAddr == 0 { return } @@ -325,7 +326,7 @@ func (t *Task) rseqAddrInterrupt() { return } - start := usermem.Addr(cs.Start) + start := hostarch.Addr(cs.Start) critRange, ok := start.ToRange(cs.PostCommitOffset) if !ok { t.Debugf("Invalid start and offset in %+v", cs) @@ -334,7 +335,7 @@ func (t *Task) rseqAddrInterrupt() { return } - abort := usermem.Addr(cs.Abort) + abort := hostarch.Addr(cs.Abort) if critRange.Contains(abort) { t.Debugf("Abort in critical section in %+v", cs) t.forceSignal(linux.SIGSEGV, false /* unconditional */) @@ -353,7 +354,7 @@ func (t *Task) rseqAddrInterrupt() { return } - sig := usermem.ByteOrder.Uint32(buf) + sig := hostarch.ByteOrder.Uint32(buf) if sig != t.rseqSignature { t.Debugf("Mismatched rseq signature %d != %d", sig, t.rseqSignature) t.forceSignal(linux.SIGSEGV, false /* unconditional */) @@ -376,7 +377,7 @@ func (t *Task) rseqAddrInterrupt() { } // Finally we can actually decide whether or not to restart. - if !critRange.Contains(usermem.Addr(t.Arch().IP())) { + if !critRange.Contains(hostarch.Addr(t.Arch().IP())) { return } @@ -386,7 +387,7 @@ func (t *Task) rseqAddrInterrupt() { // Preconditions: The caller must be running on the task goroutine. func (t *Task) oldRSeqInterrupt() { r := t.tg.oldRSeqCritical.Load().(*OldRSeqCriticalRegion) - if ip := t.Arch().IP(); r.CriticalSection.Contains(usermem.Addr(ip)) { + if ip := t.Arch().IP(); r.CriticalSection.Contains(hostarch.Addr(ip)) { t.Debugf("Interrupted rseq critical section at %#x; restarting at %#x", ip, r.Restart) t.Arch().SetIP(uintptr(r.Restart)) t.Arch().SetOldRSeqInterruptedIP(ip) diff --git a/pkg/sentry/kernel/seccomp.go b/pkg/sentry/kernel/seccomp.go index 8163a6132..a95e174a2 100644 --- a/pkg/sentry/kernel/seccomp.go +++ b/pkg/sentry/kernel/seccomp.go @@ -18,9 +18,9 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/bpf" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) const maxSyscallFilterInstructions = 1 << 15 @@ -35,11 +35,11 @@ func dataAsBPFInput(t *Task, d *linux.SeccompData) bpf.Input { return bpf.InputBytes{ Data: buf, // Go-marshal always uses the native byte order. - Order: usermem.ByteOrder, + Order: hostarch.ByteOrder, } } -func seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalInfo { +func seccompSiginfo(t *Task, errno, sysno int32, ip hostarch.Addr) *arch.SignalInfo { si := &arch.SignalInfo{ Signo: int32(linux.SIGSYS), Errno: errno, @@ -56,7 +56,7 @@ func seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalIn // in because vsyscalls do not use the values in t.Arch().) // // Preconditions: The caller must be running on the task goroutine. -func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip usermem.Addr) linux.BPFAction { +func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip hostarch.Addr) linux.BPFAction { result := linux.BPFAction(t.evaluateSyscallFilters(sysno, args, ip)) action := result & linux.SECCOMP_RET_ACTION switch action { @@ -102,7 +102,7 @@ func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip u return action } -func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip usermem.Addr) uint32 { +func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip hostarch.Addr) uint32 { data := linux.SeccompData{ Nr: sysno, Arch: t.image.st.AuditNumber, diff --git a/pkg/sentry/kernel/shm/BUILD b/pkg/sentry/kernel/shm/BUILD index 073e14507..1c3c0794f 100644 --- a/pkg/sentry/kernel/shm/BUILD +++ b/pkg/sentry/kernel/shm/BUILD @@ -28,6 +28,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/kernel/shm/shm.go b/pkg/sentry/kernel/shm/shm.go index 92d60ba78..a73f1bdca 100644 --- a/pkg/sentry/kernel/shm/shm.go +++ b/pkg/sentry/kernel/shm/shm.go @@ -38,6 +38,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -47,7 +48,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // Key represents a shm segment key. Analogous to a file name. @@ -197,13 +197,13 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size ui } var sizeAligned uint64 - if val, ok := usermem.Addr(size).RoundUp(); ok { + if val, ok := hostarch.Addr(size).RoundUp(); ok { sizeAligned = uint64(val) } else { return nil, syserror.EINVAL } - if numPages := sizeAligned / usermem.PageSize; r.totalPages+numPages > linux.SHMALL { + if numPages := sizeAligned / hostarch.PageSize; r.totalPages+numPages > linux.SHMALL { // "... allocating a segment of the requested size would cause the // system to exceed the system-wide limit on shared memory (SHMALL)." // - man shmget(2) @@ -232,7 +232,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider)) } - effectiveSize := uint64(usermem.Addr(size).MustRoundUp()) + effectiveSize := uint64(hostarch.Addr(size).MustRoundUp()) fr, err := mfp.MemoryFile().Allocate(effectiveSize, usage.Anonymous) if err != nil { return nil, err @@ -267,7 +267,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi r.shms[id] = shm r.keysToShms[key] = shm - r.totalPages += effectiveSize / usermem.PageSize + r.totalPages += effectiveSize / hostarch.PageSize return shm, nil } @@ -318,7 +318,7 @@ func (r *Registry) remove(s *Shm) { } delete(r.shms, s.ID) - r.totalPages -= s.effectiveSize / usermem.PageSize + r.totalPages -= s.effectiveSize / hostarch.PageSize } // Release drops the self-reference of each active shm segment in the registry. @@ -386,7 +386,7 @@ type Shm struct { // effectiveSize of the segment, rounding up to the next page // boundary. Immutable. // - // Invariant: effectiveSize must be a multiple of usermem.PageSize. + // Invariant: effectiveSize must be a multiple of hostarch.PageSize. effectiveSize uint64 // fr is the offset into mfp.MemoryFile() that backs this contents of this @@ -467,7 +467,7 @@ func (s *Shm) Msync(context.Context, memmap.MappableRange) error { } // AddMapping implements memmap.Mappable.AddMapping. -func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) error { +func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ hostarch.AddrRange, _ uint64, _ bool) error { s.mu.Lock() defer s.mu.Unlock() s.attachTime = ktime.NowFromContext(ctx) @@ -482,7 +482,7 @@ func (s *Shm) AddMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.A } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ usermem.AddrRange, _ uint64, _ bool) { +func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ hostarch.AddrRange, _ uint64, _ bool) { s.mu.Lock() defer s.mu.Unlock() // RemoveMapping may be called during task exit, when ctx @@ -503,12 +503,12 @@ func (s *Shm) RemoveMapping(ctx context.Context, _ memmap.MappingSpace, _ userme } // CopyMapping implements memmap.Mappable.CopyMapping. -func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error { +func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, hostarch.AddrRange, uint64, bool) error { return nil } // Translate implements memmap.Mappable.Translate. -func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { var err error if required.End > s.fr.Length() { err = &memmap.BusError{syserror.EFAULT} @@ -519,7 +519,7 @@ func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableR Source: source, File: s.mfp.MemoryFile(), Offset: s.fr.Start + source.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, err } @@ -543,7 +543,7 @@ type AttachOpts struct { // // Postconditions: The returned MMapOpts are valid only as long as a reference // continues to be held on s. -func (s *Shm) ConfigureAttach(ctx context.Context, addr usermem.Addr, opts AttachOpts) (memmap.MMapOpts, error) { +func (s *Shm) ConfigureAttach(ctx context.Context, addr hostarch.Addr, opts AttachOpts) (memmap.MMapOpts, error) { s.mu.Lock() defer s.mu.Unlock() if s.pendingDestruction && s.ReadRefs() == 0 { @@ -565,12 +565,12 @@ func (s *Shm) ConfigureAttach(ctx context.Context, addr usermem.Addr, opts Attac Offset: 0, Addr: addr, Fixed: opts.Remap, - Perms: usermem.AccessType{ + Perms: hostarch.AccessType{ Read: true, Write: !opts.Readonly, Execute: opts.Execute, }, - MaxPerms: usermem.AnyAccess, + MaxPerms: hostarch.AnyAccess, Mappable: s, MappingIdentity: s, }, nil diff --git a/pkg/sentry/kernel/syscalls.go b/pkg/sentry/kernel/syscalls.go index 332bdb8e8..953d4310e 100644 --- a/pkg/sentry/kernel/syscalls.go +++ b/pkg/sentry/kernel/syscalls.go @@ -20,9 +20,9 @@ import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/bits" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // maxSyscallNum is the highest supported syscall number. @@ -243,7 +243,7 @@ type SyscallTable struct { // Emulate is a collection of instruction addresses to emulate. The // keys are addresses, and the values are system call numbers. - Emulate map[usermem.Addr]uintptr + Emulate map[hostarch.Addr]uintptr // The function to call in case of a missing system call. Missing MissingFn @@ -316,7 +316,7 @@ func (s *SyscallTable) Init() { } if s.Emulate == nil { // Ensure non-nil emulate table. - s.Emulate = make(map[usermem.Addr]uintptr) + s.Emulate = make(map[hostarch.Addr]uintptr) } max := s.MaxSysno() // Checked during RegisterSyscallTable. @@ -359,7 +359,7 @@ func (s *SyscallTable) LookupNo(name string) (uintptr, error) { } // LookupEmulate looks up an emulation syscall number. -func (s *SyscallTable) LookupEmulate(addr usermem.Addr) (uintptr, bool) { +func (s *SyscallTable) LookupEmulate(addr hostarch.Addr) (uintptr, bool) { sysno, ok := s.Emulate[addr] return sysno, ok } diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go index 36141dd09..399985039 100644 --- a/pkg/sentry/kernel/task.go +++ b/pkg/sentry/kernel/task.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/bpf" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/inet" @@ -33,7 +34,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" "gvisor.dev/gvisor/pkg/waiter" ) @@ -470,7 +470,7 @@ type Task struct { // ThreadID to 0, and wake any futex waiters. // // cleartid is exclusive to the task goroutine. - cleartid usermem.Addr + cleartid hostarch.Addr // This is mostly a fake cpumask just for sched_set/getaffinity as we // don't really control the affinity. @@ -540,12 +540,12 @@ type Task struct { // oldRSeqCPUAddr is a pointer to the userspace old rseq CPU variable. // // oldRSeqCPUAddr is exclusive to the task goroutine. - oldRSeqCPUAddr usermem.Addr + oldRSeqCPUAddr hostarch.Addr // rseqAddr is a pointer to the userspace linux.RSeq structure. // // rseqAddr is exclusive to the task goroutine. - rseqAddr usermem.Addr + rseqAddr hostarch.Addr // rseqSignature is the signature that the rseq abort IP must be signed // with. @@ -575,7 +575,7 @@ type Task struct { // robustList is a pointer to the head of the tasks's robust futex // list. - robustList usermem.Addr + robustList hostarch.Addr // startTime is the real time at which the task started. It is set when // a Task is created or invokes execve(2). @@ -652,7 +652,7 @@ func (t *Task) Kernel() *Kernel { // SetClearTID sets t's cleartid. // // Preconditions: The caller must be running on the task goroutine. -func (t *Task) SetClearTID(addr usermem.Addr) { +func (t *Task) SetClearTID(addr hostarch.Addr) { t.cleartid = addr } diff --git a/pkg/sentry/kernel/task_clone.go b/pkg/sentry/kernel/task_clone.go index f305e69c0..405771f3f 100644 --- a/pkg/sentry/kernel/task_clone.go +++ b/pkg/sentry/kernel/task_clone.go @@ -20,6 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/bpf" "gvisor.dev/gvisor/pkg/cleanup" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" @@ -85,12 +86,12 @@ type CloneOptions struct { // Stack is the initial stack pointer of the new task. If Stack is 0, the // new task will start with the same stack pointer as its parent. - Stack usermem.Addr + Stack hostarch.Addr // If SetTLS is true, set the new task's TLS (thread-local storage) // descriptor to TLS. If SetTLS is false, TLS is ignored. SetTLS bool - TLS usermem.Addr + TLS hostarch.Addr // If ChildClearTID is true, when the child exits, 0 is written to the // address ChildTID in the child's memory, and if the write is successful a @@ -101,7 +102,7 @@ type CloneOptions struct { // Linux, failed writes are silently ignored.) ChildClearTID bool ChildSetTID bool - ChildTID usermem.Addr + ChildTID hostarch.Addr // If ParentSetTID is true, the child's thread ID (in the parent's PID // namespace) is written to address ParentTID in the parent's memory. (As @@ -112,7 +113,7 @@ type CloneOptions struct { // and child's memory, but this is a documentation error fixed by // 87ab04792ced ("clone.2: Fix description of CLONE_PARENT_SETTID"). ParentSetTID bool - ParentTID usermem.Addr + ParentTID hostarch.Addr // If Vfork is true, place the parent in vforkStop until the cloned task // releases its TaskImage. @@ -268,7 +269,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) { } tg := t.tg - rseqAddr := usermem.Addr(0) + rseqAddr := hostarch.Addr(0) rseqSignature := uint32(0) if opts.NewThreadGroup { if tg.mounts != nil { diff --git a/pkg/sentry/kernel/task_futex.go b/pkg/sentry/kernel/task_futex.go index 195c7da9b..4dc41b82b 100644 --- a/pkg/sentry/kernel/task_futex.go +++ b/pkg/sentry/kernel/task_futex.go @@ -16,6 +16,7 @@ package kernel import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/kernel/futex" "gvisor.dev/gvisor/pkg/usermem" @@ -30,33 +31,33 @@ func (t *Task) Futex() *futex.Manager { } // SwapUint32 implements futex.Target.SwapUint32. -func (t *Task) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) { +func (t *Task) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) { return t.MemoryManager().SwapUint32(t, addr, new, usermem.IOOpts{ AddressSpaceActive: true, }) } // CompareAndSwapUint32 implements futex.Target.CompareAndSwapUint32. -func (t *Task) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) { +func (t *Task) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) { return t.MemoryManager().CompareAndSwapUint32(t, addr, old, new, usermem.IOOpts{ AddressSpaceActive: true, }) } // LoadUint32 implements futex.Target.LoadUint32. -func (t *Task) LoadUint32(addr usermem.Addr) (uint32, error) { +func (t *Task) LoadUint32(addr hostarch.Addr) (uint32, error) { return t.MemoryManager().LoadUint32(t, addr, usermem.IOOpts{ AddressSpaceActive: true, }) } // GetSharedKey implements futex.Target.GetSharedKey. -func (t *Task) GetSharedKey(addr usermem.Addr) (futex.Key, error) { +func (t *Task) GetSharedKey(addr hostarch.Addr) (futex.Key, error) { return t.MemoryManager().GetSharedFutexKey(t, addr) } // GetRobustList sets the robust futex list for the task. -func (t *Task) GetRobustList() usermem.Addr { +func (t *Task) GetRobustList() hostarch.Addr { t.mu.Lock() addr := t.robustList t.mu.Unlock() @@ -64,7 +65,7 @@ func (t *Task) GetRobustList() usermem.Addr { } // SetRobustList sets the robust futex list for the task. -func (t *Task) SetRobustList(addr usermem.Addr) { +func (t *Task) SetRobustList(addr hostarch.Addr) { t.mu.Lock() t.robustList = addr t.mu.Unlock() @@ -84,28 +85,28 @@ func (t *Task) exitRobustList() { } var rl linux.RobustListHead - if _, err := rl.CopyIn(t, usermem.Addr(addr)); err != nil { + if _, err := rl.CopyIn(t, hostarch.Addr(addr)); err != nil { return } next := primitive.Uint64(rl.List) done := 0 - var pendingLockAddr usermem.Addr + var pendingLockAddr hostarch.Addr if rl.ListOpPending != 0 { - pendingLockAddr = usermem.Addr(rl.ListOpPending + rl.FutexOffset) + pendingLockAddr = hostarch.Addr(rl.ListOpPending + rl.FutexOffset) } // Wake up normal elements. - for usermem.Addr(next) != addr { + for hostarch.Addr(next) != addr { // We traverse to the next element of the list before we // actually wake anything. This prevents the race where waking // this futex causes a modification of the list. - thisLockAddr := usermem.Addr(uint64(next) + rl.FutexOffset) + thisLockAddr := hostarch.Addr(uint64(next) + rl.FutexOffset) // Try to decode the next element in the list before waking the // current futex. But don't check the error until after we've // woken the current futex. Linux does it in this order too - _, nextErr := next.CopyIn(t, usermem.Addr(next)) + _, nextErr := next.CopyIn(t, hostarch.Addr(next)) // Wakeup the current futex if it's not pending. if thisLockAddr != pendingLockAddr { @@ -133,7 +134,7 @@ func (t *Task) exitRobustList() { } // wakeRobustListOne wakes a single futex from the robust list. -func (t *Task) wakeRobustListOne(addr usermem.Addr) { +func (t *Task) wakeRobustListOne(addr hostarch.Addr) { // Bit 0 in address signals PI futex. pi := addr&1 == 1 addr = addr &^ 1 diff --git a/pkg/sentry/kernel/task_image.go b/pkg/sentry/kernel/task_image.go index ce5fbd299..bd5543d4e 100644 --- a/pkg/sentry/kernel/task_image.go +++ b/pkg/sentry/kernel/task_image.go @@ -19,12 +19,12 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel/futex" "gvisor.dev/gvisor/pkg/sentry/loader" "gvisor.dev/gvisor/pkg/sentry/mm" "gvisor.dev/gvisor/pkg/syserr" - "gvisor.dev/gvisor/pkg/usermem" ) var errNoSyscalls = syserr.New("no syscall table found", linux.ENOEXEC) @@ -129,7 +129,7 @@ func (t *Task) Stack() *arch.Stack { return &arch.Stack{ Arch: t.Arch(), IO: t.MemoryManager(), - Bottom: usermem.Addr(t.Arch().Stack()), + Bottom: hostarch.Addr(t.Arch().Stack()), } } diff --git a/pkg/sentry/kernel/task_log.go b/pkg/sentry/kernel/task_log.go index c70e5e6ce..72b9a0384 100644 --- a/pkg/sentry/kernel/task_log.go +++ b/pkg/sentry/kernel/task_log.go @@ -20,6 +20,7 @@ import ( "sort" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/usermem" ) @@ -108,9 +109,9 @@ func (t *Task) debugDumpStack() { return } t.Debugf("Stack:") - start := usermem.Addr(t.Arch().Stack()) + start := hostarch.Addr(t.Arch().Stack()) // Round addr down to a 16-byte boundary. - start &= ^usermem.Addr(15) + start &= ^hostarch.Addr(15) // Print 16 bytes per line, one byte at a time. for offset := uint64(0); offset < maxStackDebugBytes; offset += 16 { addr, ok := start.AddLength(offset) @@ -127,7 +128,7 @@ func (t *Task) debugDumpStack() { t.Debugf("%x: % x", addr, data[:n]) } if err != nil { - t.Debugf("Error reading stack at address %x: %v", addr+usermem.Addr(n), err) + t.Debugf("Error reading stack at address %x: %v", addr+hostarch.Addr(n), err) break } } @@ -147,9 +148,9 @@ func (t *Task) debugDumpCode() { } t.Debugf("Code:") // Print code on both sides of the instruction register. - start := usermem.Addr(t.Arch().IP()) - maxCodeDebugBytes/2 + start := hostarch.Addr(t.Arch().IP()) - maxCodeDebugBytes/2 // Round addr down to a 16-byte boundary. - start &= ^usermem.Addr(15) + start &= ^hostarch.Addr(15) // Print 16 bytes per line, one byte at a time. for offset := uint64(0); offset < maxCodeDebugBytes; offset += 16 { addr, ok := start.AddLength(offset) @@ -166,7 +167,7 @@ func (t *Task) debugDumpCode() { t.Debugf("%x: % x", addr, data[:n]) } if err != nil { - t.Debugf("Error reading stack at address %x: %v", addr+usermem.Addr(n), err) + t.Debugf("Error reading stack at address %x: %v", addr+hostarch.Addr(n), err) break } } diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go index 3ccecf4b6..068f25af1 100644 --- a/pkg/sentry/kernel/task_run.go +++ b/pkg/sentry/kernel/task_run.go @@ -23,13 +23,13 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/goid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/hostcpu" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // A taskRunState is a reified state in the task state machine. See README.md @@ -148,7 +148,7 @@ func (*runApp) handleCPUIDInstruction(t *Task) error { region := trace.StartRegion(t.traceContext, cpuidRegion) expected := arch.CPUIDInstruction[:] found := make([]byte, len(expected)) - _, err := t.CopyInBytes(usermem.Addr(t.Arch().IP()), found) + _, err := t.CopyInBytes(hostarch.Addr(t.Arch().IP()), found) if err == nil && bytes.Equal(expected, found) { // Skip the cpuid instruction. t.Arch().CPUIDEmulate(t) @@ -307,8 +307,8 @@ func (app *runApp) execute(t *Task) taskRunState { // normally. if at.Any() { region := trace.StartRegion(t.traceContext, faultRegion) - addr := usermem.Addr(info.Addr()) - err := t.MemoryManager().HandleUserFault(t, addr, at, usermem.Addr(t.Arch().Stack())) + addr := hostarch.Addr(info.Addr()) + err := t.MemoryManager().HandleUserFault(t, addr, at, hostarch.Addr(t.Arch().Stack())) region.End() if err == nil { // The fault was handled appropriately. diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go index 75af3af79..c2b9fc08f 100644 --- a/pkg/sentry/kernel/task_signals.go +++ b/pkg/sentry/kernel/task_signals.go @@ -23,11 +23,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/eventchannel" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" ucspb "gvisor.dev/gvisor/pkg/sentry/kernel/uncaught_signal_go_proto" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" "gvisor.dev/gvisor/pkg/waiter" ) @@ -243,7 +243,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct) // Are executing on the main stack, // or the provided alternate stack? - sp := usermem.Addr(t.Arch().Stack()) + sp := hostarch.Addr(t.Arch().Stack()) // N.B. This is a *copy* of the alternate stack that the user's signal // handler expects to see in its ucontext (even if it's not in use). @@ -251,7 +251,7 @@ func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct) if act.IsOnStack() && alt.IsEnabled() { alt.SetOnStack() if !alt.Contains(sp) { - sp = usermem.Addr(alt.Top()) + sp = hostarch.Addr(alt.Top()) } } @@ -652,7 +652,7 @@ func (t *Task) SignalStack() arch.SignalStack { // onSignalStack returns true if the task is executing on the given signal stack. func (t *Task) onSignalStack(alt arch.SignalStack) bool { - sp := usermem.Addr(t.Arch().Stack()) + sp := hostarch.Addr(t.Arch().Stack()) return alt.Contains(sp) } @@ -720,7 +720,7 @@ func (tg *ThreadGroup) SetSignalAct(sig linux.Signal, actptr *arch.SignalAct) (a // CopyOutSignalAct converts the given SignalAct into an architecture-specific // type and then copies it out to task memory. -func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error { +func (t *Task) CopyOutSignalAct(addr hostarch.Addr, s *arch.SignalAct) error { n := t.Arch().NewSignalAct() n.SerializeFrom(s) _, err := n.CopyOut(t, addr) @@ -729,7 +729,7 @@ func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error { // CopyInSignalAct copies an architecture-specific sigaction type from task // memory and then converts it into a SignalAct. -func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) { +func (t *Task) CopyInSignalAct(addr hostarch.Addr) (arch.SignalAct, error) { n := t.Arch().NewSignalAct() var s arch.SignalAct if _, err := n.CopyIn(t, addr); err != nil { @@ -741,7 +741,7 @@ func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) { // CopyOutSignalStack converts the given SignalStack into an // architecture-specific type and then copies it out to task memory. -func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error { +func (t *Task) CopyOutSignalStack(addr hostarch.Addr, s *arch.SignalStack) error { n := t.Arch().NewSignalStack() n.SerializeFrom(s) _, err := n.CopyOut(t, addr) @@ -750,7 +750,7 @@ func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error // CopyInSignalStack copies an architecture-specific stack_t from task memory // and then converts it into a SignalStack. -func (t *Task) CopyInSignalStack(addr usermem.Addr) (arch.SignalStack, error) { +func (t *Task) CopyInSignalStack(addr hostarch.Addr) (arch.SignalStack, error) { n := t.Arch().NewSignalStack() var s arch.SignalStack if _, err := n.CopyIn(t, addr); err != nil { diff --git a/pkg/sentry/kernel/task_start.go b/pkg/sentry/kernel/task_start.go index 36e1384f1..fc18b6253 100644 --- a/pkg/sentry/kernel/task_start.go +++ b/pkg/sentry/kernel/task_start.go @@ -17,6 +17,7 @@ package kernel import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" @@ -25,7 +26,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // TaskConfig defines the configuration of a new Task (see below). @@ -86,7 +86,7 @@ type TaskConfig struct { MountNamespaceVFS2 *vfs.MountNamespace // RSeqAddr is a pointer to the the userspace linux.RSeq structure. - RSeqAddr usermem.Addr + RSeqAddr hostarch.Addr // RSeqSignature is the signature that the rseq abort IP must be signed // with. diff --git a/pkg/sentry/kernel/task_syscall.go b/pkg/sentry/kernel/task_syscall.go index 2e84bd88a..2c658d001 100644 --- a/pkg/sentry/kernel/task_syscall.go +++ b/pkg/sentry/kernel/task_syscall.go @@ -22,12 +22,12 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/bits" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/metric" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) var vsyscallCount = metric.MustCreateNewUint64Metric("/kernel/vsyscall_count", false /* sync */, "Number of times vsyscalls were invoked by the application") @@ -153,7 +153,7 @@ func (t *Task) doSyscall() taskRunState { // Check seccomp filters. The nil check is for performance (as seccomp use // is rare), not needed for correctness. if t.syscallFilters.Load() != nil { - switch r := t.checkSeccompSyscall(int32(sysno), args, usermem.Addr(t.Arch().IP())); r { + switch r := t.checkSeccompSyscall(int32(sysno), args, hostarch.Addr(t.Arch().IP())); r { case linux.SECCOMP_RET_ERRNO, linux.SECCOMP_RET_TRAP: t.Debugf("Syscall %d: denied by seccomp", sysno) return (*runSyscallExit)(nil) @@ -283,12 +283,12 @@ func (*runSyscallExit) execute(t *Task) taskRunState { // doVsyscall is the entry point for a vsyscall invocation of syscall sysno, as // indicated by an execution fault at address addr. doVsyscall returns the // task's next run state. -func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState { +func (t *Task) doVsyscall(addr hostarch.Addr, sysno uintptr) taskRunState { vsyscallCount.Increment() // Grab the caller up front, to make sure there's a sensible stack. caller := t.Arch().Native(uintptr(0)) - if _, err := caller.CopyIn(t, usermem.Addr(t.Arch().Stack())); err != nil { + if _, err := caller.CopyIn(t, hostarch.Addr(t.Arch().Stack())); err != nil { t.Debugf("vsyscall %d: error reading return address from stack: %v", sysno, err) t.forceSignal(linux.SIGSEGV, false /* unconditional */) t.SendSignal(SignalInfoPriv(linux.SIGSEGV)) @@ -322,7 +322,7 @@ func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState { } type runVsyscallAfterPtraceEventSeccomp struct { - addr usermem.Addr + addr hostarch.Addr sysno uintptr caller marshal.Marshallable } @@ -337,7 +337,7 @@ func (r *runVsyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState { // currently emulated call. ... The tracer MUST NOT modify rip or rsp." - // Documentation/prctl/seccomp_filter.txt. On Linux, changing orig_ax or ip // causes do_exit(SIGSYS), and changing sp is ignored. - if (sysno != ^uintptr(0) && sysno != r.sysno) || usermem.Addr(t.Arch().IP()) != r.addr { + if (sysno != ^uintptr(0) && sysno != r.sysno) || hostarch.Addr(t.Arch().IP()) != r.addr { t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)}) return (*runExit)(nil) } diff --git a/pkg/sentry/kernel/task_usermem.go b/pkg/sentry/kernel/task_usermem.go index 94dabbcd8..fc6d9438a 100644 --- a/pkg/sentry/kernel/task_usermem.go +++ b/pkg/sentry/kernel/task_usermem.go @@ -19,6 +19,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/mm" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" @@ -27,7 +28,7 @@ import ( // MAX_RW_COUNT is the maximum size in bytes of a single read or write. // Reads and writes that exceed this size may be silently truncated. // (Linux: include/linux/fs.h:MAX_RW_COUNT) -var MAX_RW_COUNT = int(usermem.Addr(math.MaxInt32).RoundDown()) +var MAX_RW_COUNT = int(hostarch.Addr(math.MaxInt32).RoundDown()) // Activate ensures that the task has an active address space. func (t *Task) Activate() { @@ -49,7 +50,7 @@ func (t *Task) Deactivate() { // data without reflection and pass in a byte slice. // // This Task's AddressSpace must be active. -func (t *Task) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) { +func (t *Task) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) { return t.MemoryManager().CopyIn(t, addr, dst, usermem.IOOpts{ AddressSpaceActive: true, }) @@ -59,7 +60,7 @@ func (t *Task) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) { // data without reflection and pass in a byte slice. // // This Task's AddressSpace must be active. -func (t *Task) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) { +func (t *Task) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) { return t.MemoryManager().CopyOut(t, addr, src, usermem.IOOpts{ AddressSpaceActive: true, }) @@ -70,7 +71,7 @@ func (t *Task) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) { // user memory that is unmapped or not readable by the user. // // This Task's AddressSpace must be active. -func (t *Task) CopyInString(addr usermem.Addr, maxlen int) (string, error) { +func (t *Task) CopyInString(addr hostarch.Addr, maxlen int) (string, error) { return usermem.CopyStringIn(t, t.MemoryManager(), addr, maxlen, usermem.IOOpts{ AddressSpaceActive: true, }) @@ -90,7 +91,7 @@ func (t *Task) CopyInString(addr usermem.Addr, maxlen int) (string, error) { // { "abc" } => 4 (3 for length, 1 for elements) // // This Task's AddressSpace must be active. -func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([]string, error) { +func (t *Task) CopyInVector(addr hostarch.Addr, maxElemSize, maxTotalSize int) ([]string, error) { var v []string for { argAddr := t.Arch().Native(0) @@ -109,12 +110,12 @@ func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([ if maxTotalSize < thisMax { thisMax = maxTotalSize } - arg, err := t.CopyInString(usermem.Addr(t.Arch().Value(argAddr)), thisMax) + arg, err := t.CopyInString(hostarch.Addr(t.Arch().Value(argAddr)), thisMax) if err != nil { return v, err } v = append(v, arg) - addr += usermem.Addr(t.Arch().Width()) + addr += hostarch.Addr(t.Arch().Width()) maxTotalSize -= len(arg) + 1 } return v, nil @@ -126,7 +127,7 @@ func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([ // Preconditions: Same as usermem.IO.CopyOut, plus: // * The caller must be running on the task goroutine. // * t's AddressSpace must be active. -func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error { +func (t *Task) CopyOutIovecs(addr hostarch.Addr, src hostarch.AddrRangeSeq) error { switch t.Arch().Width() { case 8: const itemLen = 16 @@ -137,8 +138,8 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error b := t.CopyScratchBuffer(itemLen) for ; !src.IsEmpty(); src = src.Tail() { ar := src.Head() - usermem.ByteOrder.PutUint64(b[0:8], uint64(ar.Start)) - usermem.ByteOrder.PutUint64(b[8:16], uint64(ar.Length())) + hostarch.ByteOrder.PutUint64(b[0:8], uint64(ar.Start)) + hostarch.ByteOrder.PutUint64(b[8:16], uint64(ar.Length())) if _, err := t.CopyOutBytes(addr, b); err != nil { return err } @@ -153,8 +154,8 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error } // CopyInIovecs copies an array of numIovecs struct iovecs from the memory -// mapped at addr, converts them to usermem.AddrRanges, and returns them as a -// usermem.AddrRangeSeq. +// mapped at addr, converts them to hostarch.AddrRanges, and returns them as a +// hostarch.AddrRangeSeq. // // CopyInIovecs shares the following properties with Linux's // lib/iov_iter.c:import_iovec() => fs/read_write.c:rw_copy_check_uvector(): @@ -175,42 +176,42 @@ func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error // Preconditions: Same as usermem.IO.CopyIn, plus: // * The caller must be running on the task goroutine. // * t's AddressSpace must be active. -func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRangeSeq, error) { +func (t *Task) CopyInIovecs(addr hostarch.Addr, numIovecs int) (hostarch.AddrRangeSeq, error) { if numIovecs == 0 { - return usermem.AddrRangeSeq{}, nil + return hostarch.AddrRangeSeq{}, nil } - var dst []usermem.AddrRange + var dst []hostarch.AddrRange if numIovecs > 1 { - dst = make([]usermem.AddrRange, 0, numIovecs) + dst = make([]hostarch.AddrRange, 0, numIovecs) } switch t.Arch().Width() { case 8: const itemLen = 16 if _, ok := addr.AddLength(uint64(numIovecs) * itemLen); !ok { - return usermem.AddrRangeSeq{}, syserror.EFAULT + return hostarch.AddrRangeSeq{}, syserror.EFAULT } b := t.CopyScratchBuffer(itemLen) for i := 0; i < numIovecs; i++ { if _, err := t.CopyInBytes(addr, b); err != nil { - return usermem.AddrRangeSeq{}, err + return hostarch.AddrRangeSeq{}, err } - base := usermem.Addr(usermem.ByteOrder.Uint64(b[0:8])) - length := usermem.ByteOrder.Uint64(b[8:16]) + base := hostarch.Addr(hostarch.ByteOrder.Uint64(b[0:8])) + length := hostarch.ByteOrder.Uint64(b[8:16]) if length > math.MaxInt64 { - return usermem.AddrRangeSeq{}, syserror.EINVAL + return hostarch.AddrRangeSeq{}, syserror.EINVAL } ar, ok := t.MemoryManager().CheckIORange(base, int64(length)) if !ok { - return usermem.AddrRangeSeq{}, syserror.EFAULT + return hostarch.AddrRangeSeq{}, syserror.EFAULT } if numIovecs == 1 { // Special case to avoid allocating dst. - return usermem.AddrRangeSeqOf(ar).TakeFirst(MAX_RW_COUNT), nil + return hostarch.AddrRangeSeqOf(ar).TakeFirst(MAX_RW_COUNT), nil } dst = append(dst, ar) @@ -218,7 +219,7 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange } default: - return usermem.AddrRangeSeq{}, syserror.ENOSYS + return hostarch.AddrRangeSeq{}, syserror.ENOSYS } // Truncate to MAX_RW_COUNT. @@ -226,13 +227,13 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange for i := range dst { dstlen := uint64(dst[i].Length()) if rem := uint64(MAX_RW_COUNT) - total; rem < dstlen { - dst[i].End -= usermem.Addr(dstlen - rem) + dst[i].End -= hostarch.Addr(dstlen - rem) dstlen = rem } total += dstlen } - return usermem.AddrRangeSeqFromSlice(dst), nil + return hostarch.AddrRangeSeqFromSlice(dst), nil } // SingleIOSequence returns a usermem.IOSequence representing [addr, @@ -245,7 +246,7 @@ func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRange // write syscalls in Linux do not use import_single_range(). However they check // access_ok() in fs/read_write.c:vfs_read/vfs_write, and overflowing address // ranges are truncated to MAX_RW_COUNT by fs/read_write.c:rw_verify_area().) -func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) { +func (t *Task) SingleIOSequence(addr hostarch.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) { if length > MAX_RW_COUNT { length = MAX_RW_COUNT } @@ -255,7 +256,7 @@ func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOp } return usermem.IOSequence{ IO: t.MemoryManager(), - Addrs: usermem.AddrRangeSeqOf(ar), + Addrs: hostarch.AddrRangeSeqOf(ar), Opts: opts, }, nil } @@ -267,7 +268,7 @@ func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOp // IovecsIOSequence is analogous to Linux's lib/iov_iter.c:import_iovec(). // // Preconditions: Same as Task.CopyInIovecs. -func (t *Task) IovecsIOSequence(addr usermem.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) { +func (t *Task) IovecsIOSequence(addr hostarch.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) { if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV { return usermem.IOSequence{}, syserror.EINVAL } @@ -317,7 +318,7 @@ func (cc *taskCopyContext) getMemoryManager() (*mm.MemoryManager, error) { } // CopyInBytes implements marshal.CopyContext.CopyInBytes. -func (cc *taskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) { +func (cc *taskCopyContext) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) { tmm, err := cc.getMemoryManager() if err != nil { return 0, err @@ -327,7 +328,7 @@ func (cc *taskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, erro } // CopyOutBytes implements marshal.CopyContext.CopyOutBytes. -func (cc *taskCopyContext) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) { +func (cc *taskCopyContext) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) { tmm, err := cc.getMemoryManager() if err != nil { return 0, err @@ -360,11 +361,11 @@ func (cc *ownTaskCopyContext) CopyScratchBuffer(size int) []byte { } // CopyInBytes implements marshal.CopyContext.CopyInBytes. -func (cc *ownTaskCopyContext) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) { +func (cc *ownTaskCopyContext) CopyInBytes(addr hostarch.Addr, dst []byte) (int, error) { return cc.t.MemoryManager().CopyIn(cc.t, addr, dst, cc.opts) } // CopyOutBytes implements marshal.CopyContext.CopyOutBytes. -func (cc *ownTaskCopyContext) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) { +func (cc *ownTaskCopyContext) CopyOutBytes(addr hostarch.Addr, src []byte) (int, error) { return cc.t.MemoryManager().CopyOut(cc.t, addr, src, cc.opts) } diff --git a/pkg/sentry/kernel/timekeeper_test.go b/pkg/sentry/kernel/timekeeper_test.go index cf2f7ca72..dfc3c0719 100644 --- a/pkg/sentry/kernel/timekeeper_test.go +++ b/pkg/sentry/kernel/timekeeper_test.go @@ -17,12 +17,12 @@ package kernel import ( "testing" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/contexttest" "gvisor.dev/gvisor/pkg/sentry/pgalloc" sentrytime "gvisor.dev/gvisor/pkg/sentry/time" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // mockClocks is a sentrytime.Clocks that simply returns the times in the @@ -54,7 +54,7 @@ func (c *mockClocks) GetTime(id sentrytime.ClockID) (int64, error) { func stateTestClocklessTimekeeper(tb testing.TB) *Timekeeper { ctx := contexttest.Context(tb) mfp := pgalloc.MemoryFileProviderFromContext(ctx) - fr, err := mfp.MemoryFile().Allocate(usermem.PageSize, usage.Anonymous) + fr, err := mfp.MemoryFile().Allocate(hostarch.PageSize, usage.Anonymous) if err != nil { tb.Fatalf("failed to allocate memory: %v", err) } diff --git a/pkg/sentry/kernel/vdso.go b/pkg/sentry/kernel/vdso.go index 9e5c2d26f..cc0917504 100644 --- a/pkg/sentry/kernel/vdso.go +++ b/pkg/sentry/kernel/vdso.go @@ -17,10 +17,10 @@ package kernel import ( "fmt" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" - "gvisor.dev/gvisor/pkg/usermem" ) // vdsoParams are the parameters exposed to the VDSO. @@ -96,7 +96,7 @@ func NewVDSOParamPage(mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *VDSO // access returns a mapping of the param page. func (v *VDSOParamPage) access() (safemem.Block, error) { - bs, err := v.mfp.MemoryFile().MapInternal(v.fr, usermem.ReadWrite) + bs, err := v.mfp.MemoryFile().MapInternal(v.fr, hostarch.ReadWrite) if err != nil { return safemem.Block{}, err } diff --git a/pkg/sentry/loader/BUILD b/pkg/sentry/loader/BUILD index ab074b400..ecb6603a1 100644 --- a/pkg/sentry/loader/BUILD +++ b/pkg/sentry/loader/BUILD @@ -18,6 +18,7 @@ go_library( "//pkg/binary", "//pkg/context", "//pkg/cpuid", + "//pkg/hostarch", "//pkg/log", "//pkg/rand", "//pkg/safemem", diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go index cd9fa4031..e92d9fdc3 100644 --- a/pkg/sentry/loader/elf.go +++ b/pkg/sentry/loader/elf.go @@ -25,6 +25,7 @@ import ( "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsbridge" @@ -41,7 +42,7 @@ const ( // maxTotalPhdrSize is the maximum combined size of all program // headers. Linux limits this to one page. - maxTotalPhdrSize = usermem.PageSize + maxTotalPhdrSize = hostarch.PageSize ) var ( @@ -52,8 +53,8 @@ var ( prog64Size = int(binary.Size(elf.Prog64{})) ) -func progFlagsAsPerms(f elf.ProgFlag) usermem.AccessType { - var p usermem.AccessType +func progFlagsAsPerms(f elf.ProgFlag) hostarch.AccessType { + var p hostarch.AccessType if f&elf.PF_R == elf.PF_R { p.Read = true } @@ -75,7 +76,7 @@ type elfInfo struct { arch arch.Arch // entry is the program entry point. - entry usermem.Addr + entry hostarch.Addr // phdrs are the program headers. phdrs []elf.ProgHeader @@ -230,7 +231,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) { return elfInfo{ os: os, arch: a, - entry: usermem.Addr(hdr.Entry), + entry: hostarch.Addr(hdr.Entry), phdrs: phdrs, phdrOff: hdr.Phoff, phdrSize: prog64Size, @@ -240,9 +241,9 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) { // mapSegment maps a phdr into the Task. offset is the offset to apply to // phdr.Vaddr. -func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset usermem.Addr) error { +func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr *elf.ProgHeader, offset hostarch.Addr) error { // We must make a page-aligned mapping. - adjust := usermem.Addr(phdr.Vaddr).PageOffset() + adjust := hostarch.Addr(phdr.Vaddr).PageOffset() addr, ok := offset.AddLength(phdr.Vaddr) if !ok { @@ -250,14 +251,14 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr ctx.Warningf("Computed segment load address overflows: %#x + %#x", phdr.Vaddr, offset) return syserror.ENOEXEC } - addr -= usermem.Addr(adjust) + addr -= hostarch.Addr(adjust) fileSize := phdr.Filesz + adjust if fileSize < phdr.Filesz { ctx.Infof("Computed segment file size overflows: %#x + %#x", phdr.Filesz, adjust) return syserror.ENOEXEC } - ms, ok := usermem.Addr(fileSize).RoundUp() + ms, ok := hostarch.Addr(fileSize).RoundUp() if !ok { ctx.Infof("fileSize %#x too large", fileSize) return syserror.ENOEXEC @@ -281,7 +282,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr Unmap: true, Private: true, Perms: prot, - MaxPerms: usermem.AnyAccess, + MaxPerms: hostarch.AnyAccess, } defer func() { if mopts.MappingIdentity != nil { @@ -312,7 +313,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr panic(fmt.Sprintf("zeroSize too big? %#x", uint64(zeroSize))) } if _, err := m.ZeroOut(ctx, zeroAddr, zeroSize, usermem.IOOpts{IgnorePermissions: true}); err != nil { - ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+usermem.Addr(zeroSize), err) + ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+hostarch.Addr(zeroSize), err) return err } } @@ -330,7 +331,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr if !ok { panic(fmt.Sprintf("anonymous memory doesn't fit in pre-sized range? %#x + %#x", addr, mapSize)) } - anonSize, ok := usermem.Addr(memSize - mapSize).RoundUp() + anonSize, ok := hostarch.Addr(memSize - mapSize).RoundUp() if !ok { ctx.Infof("extra anon pages too large: %#x", memSize-mapSize) return syserror.ENOEXEC @@ -339,7 +340,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr // N.B. Linux uses vm_brk_flags to map these pages, which only // honors the X bit, always mapping at least RW. ignoring These // pages are not included in the final brk region. - prot := usermem.ReadWrite + prot := hostarch.ReadWrite if phdr.Flags&elf.PF_X == elf.PF_X { prot.Execute = true } @@ -352,7 +353,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr Fixed: true, Private: true, Perms: prot, - MaxPerms: usermem.AnyAccess, + MaxPerms: hostarch.AnyAccess, }); err != nil { ctx.Infof("Error mapping PT_LOAD segment %v anonymous memory: %v", phdr, err) return err @@ -371,19 +372,19 @@ type loadedELF struct { arch arch.Arch // entry is the entry point of the ELF. - entry usermem.Addr + entry hostarch.Addr // start is the end of the ELF. - start usermem.Addr + start hostarch.Addr // end is the end of the ELF. - end usermem.Addr + end hostarch.Addr // interpter is the path to the ELF interpreter. interpreter string // phdrAddr is the address of the ELF program headers. - phdrAddr usermem.Addr + phdrAddr hostarch.Addr // phdrSize is the size of a single program header in the ELF. phdrSize int @@ -407,14 +408,14 @@ type loadedELF struct { // It does not load the ELF interpreter, or return any auxv entries. // // Preconditions: f is an ELF file. -func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset usermem.Addr) (loadedELF, error) { +func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, info elfInfo, sharedLoadOffset hostarch.Addr) (loadedELF, error) { first := true - var start, end usermem.Addr + var start, end hostarch.Addr var interpreter string for _, phdr := range info.phdrs { switch phdr.Type { case elf.PT_LOAD: - vaddr := usermem.Addr(phdr.Vaddr) + vaddr := hostarch.Addr(phdr.Vaddr) if first { first = false start = vaddr @@ -492,7 +493,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in // Note that the vaddr of the first PT_LOAD segment is ignored when // choosing the load address (even if it is non-zero). The vaddr does // become an offset from that load address. - var offset usermem.Addr + var offset hostarch.Addr if info.sharedObject { totalSize := end - start totalSize, ok := totalSize.RoundUp() @@ -688,8 +689,8 @@ func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error // ELF-specific auxv entries. bin.auxv = arch.Auxv{ arch.AuxEntry{linux.AT_PHDR, bin.phdrAddr}, - arch.AuxEntry{linux.AT_PHENT, usermem.Addr(bin.phdrSize)}, - arch.AuxEntry{linux.AT_PHNUM, usermem.Addr(bin.phdrNum)}, + arch.AuxEntry{linux.AT_PHENT, hostarch.Addr(bin.phdrSize)}, + arch.AuxEntry{linux.AT_PHNUM, hostarch.Addr(bin.phdrNum)}, arch.AuxEntry{linux.AT_ENTRY, bin.entry}, } if bin.interpreter != "" { diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go index c69b62db9..47e3775a3 100644 --- a/pkg/sentry/loader/loader.go +++ b/pkg/sentry/loader/loader.go @@ -25,6 +25,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/rand" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsbridge" @@ -266,17 +267,17 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V // Add generic auxv entries. auxv := append(loaded.auxv, arch.Auxv{ - arch.AuxEntry{linux.AT_UID, usermem.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())}, - arch.AuxEntry{linux.AT_EUID, usermem.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())}, - arch.AuxEntry{linux.AT_GID, usermem.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())}, - arch.AuxEntry{linux.AT_EGID, usermem.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())}, + arch.AuxEntry{linux.AT_UID, hostarch.Addr(c.RealKUID.In(c.UserNamespace).OrOverflow())}, + arch.AuxEntry{linux.AT_EUID, hostarch.Addr(c.EffectiveKUID.In(c.UserNamespace).OrOverflow())}, + arch.AuxEntry{linux.AT_GID, hostarch.Addr(c.RealKGID.In(c.UserNamespace).OrOverflow())}, + arch.AuxEntry{linux.AT_EGID, hostarch.Addr(c.EffectiveKGID.In(c.UserNamespace).OrOverflow())}, // The conditions that require AT_SECURE = 1 never arise. See // kernel.Task.updateCredsForExecLocked. arch.AuxEntry{linux.AT_SECURE, 0}, arch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC}, arch.AuxEntry{linux.AT_EXECFN, execfn}, arch.AuxEntry{linux.AT_RANDOM, random}, - arch.AuxEntry{linux.AT_PAGESZ, usermem.PageSize}, + arch.AuxEntry{linux.AT_PAGESZ, hostarch.PageSize}, arch.AuxEntry{linux.AT_SYSINFO_EHDR, vdsoAddr}, }...) auxv = append(auxv, extraAuxv...) diff --git a/pkg/sentry/loader/vdso.go b/pkg/sentry/loader/vdso.go index a32d37d62..fd54261fd 100644 --- a/pkg/sentry/loader/vdso.go +++ b/pkg/sentry/loader/vdso.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/arch" @@ -90,7 +91,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro var first *elf.ProgHeader var prev *elf.ProgHeader - var prevEnd usermem.Addr + var prevEnd hostarch.Addr for i, phdr := range info.phdrs { if phdr.Type != elf.PT_LOAD { continue @@ -119,7 +120,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro return elfInfo{}, syserror.ENOEXEC } - start := usermem.Addr(memoryOffset) + start := hostarch.Addr(memoryOffset) end, ok := start.AddLength(phdr.Memsz) if !ok { log.Warningf("PT_LOAD segment size overflows: %#x + %#x", start, end) @@ -210,7 +211,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) { } // Then copy it into a VDSO mapping. - size, ok := usermem.Addr(len(vdsodata.Binary)).RoundUp() + size, ok := hostarch.Addr(len(vdsodata.Binary)).RoundUp() if !ok { return nil, fmt.Errorf("VDSO size overflows? %#x", len(vdsodata.Binary)) } @@ -221,7 +222,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) { return nil, fmt.Errorf("unable to allocate VDSO memory: %v", err) } - ims, err := mf.MapInternal(vdso, usermem.ReadWrite) + ims, err := mf.MapInternal(vdso, hostarch.ReadWrite) if err != nil { mf.DecRef(vdso) return nil, fmt.Errorf("unable to map VDSO memory: %v", err) @@ -234,7 +235,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) { } // Finally, allocate a param page for this VDSO. - paramPage, err := mf.Allocate(usermem.PageSize, usage.System) + paramPage, err := mf.Allocate(hostarch.PageSize, usage.System) if err != nil { mf.DecRef(vdso) return nil, fmt.Errorf("unable to allocate VDSO param page: %v", err) @@ -266,7 +267,7 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) { // compatibility with such binaries, we load the VDSO much like Linux. // // loadVDSO takes a reference on the VDSO and parameter page FrameRegions. -func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (usermem.Addr, error) { +func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (hostarch.Addr, error) { if v.os != bin.os { ctx.Warningf("Binary ELF OS %v and VDSO ELF OS %v differ", bin.os, v.os) return 0, syserror.ENOEXEC @@ -297,8 +298,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) Fixed: true, Unmap: true, Private: true, - Perms: usermem.Read, - MaxPerms: usermem.Read, + Perms: hostarch.Read, + MaxPerms: hostarch.Read, }) if err != nil { ctx.Infof("Unable to map VDSO param page: %v", err) @@ -318,8 +319,8 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) Fixed: true, Unmap: true, Private: true, - Perms: usermem.Read, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.Read, + MaxPerms: hostarch.AnyAccess, }) if err != nil { ctx.Infof("Unable to map VDSO: %v", err) @@ -349,7 +350,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) return 0, syserror.ENOEXEC } segPage := segAddr.RoundDown() - segSize := usermem.Addr(phdr.Memsz) + segSize := hostarch.Addr(phdr.Memsz) segSize, ok = segSize.AddLength(segAddr.PageOffset()) if !ok { ctx.Warningf("PT_LOAD segment memsize %#x + offset %#x overflows", phdr.Memsz, segAddr.PageOffset()) @@ -371,7 +372,7 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) } perms := progFlagsAsPerms(phdr.Flags) - if perms != usermem.Read { + if perms != hostarch.Read { if err := m.MProtect(segPage, uint64(segSize), perms, false); err != nil { ctx.Warningf("Unable to set PT_LOAD segment protections %+v at [%#x, %#x): %v", perms, segAddr, segEnd, err) return 0, syserror.ENOEXEC diff --git a/pkg/sentry/memmap/BUILD b/pkg/sentry/memmap/BUILD index 2c95669cd..c30e88725 100644 --- a/pkg/sentry/memmap/BUILD +++ b/pkg/sentry/memmap/BUILD @@ -51,6 +51,7 @@ go_library( visibility = ["//pkg/sentry:internal"], deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/safemem", "//pkg/syserror", @@ -63,5 +64,5 @@ go_test( size = "small", srcs = ["mapping_set_test.go"], library = ":memmap", - deps = ["//pkg/usermem"], + deps = ["//pkg/hostarch"], ) diff --git a/pkg/sentry/memmap/mapping_set.go b/pkg/sentry/memmap/mapping_set.go index 457ed87f8..32863bb5e 100644 --- a/pkg/sentry/memmap/mapping_set.go +++ b/pkg/sentry/memmap/mapping_set.go @@ -18,7 +18,7 @@ import ( "fmt" "math" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // MappingSet maps offsets into a Mappable to mappings of those offsets. It is @@ -39,7 +39,7 @@ type MappingsOfRange map[MappingOfRange]struct{} // +stateify savable type MappingOfRange struct { MappingSpace MappingSpace - AddrRange usermem.AddrRange + AddrRange hostarch.AddrRange Writable bool } @@ -89,9 +89,9 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp // region with k1. k2 := MappingOfRange{ MappingSpace: k1.MappingSpace, - AddrRange: usermem.AddrRange{ + AddrRange: hostarch.AddrRange{ Start: k1.AddrRange.End, - End: k1.AddrRange.End + usermem.Addr(r2.Length()), + End: k1.AddrRange.End + hostarch.Addr(r2.Length()), }, Writable: k1.Writable, } @@ -102,7 +102,7 @@ func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 Mapp // OK. Add it to the merged map. merged[MappingOfRange{ MappingSpace: k1.MappingSpace, - AddrRange: usermem.AddrRange{ + AddrRange: hostarch.AddrRange{ Start: k1.AddrRange.Start, End: k2.AddrRange.End, }, @@ -124,11 +124,11 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin // split is a value in MappableRange, we need the offset into the // corresponding MappingsOfRange. - offset := usermem.Addr(split - r.Start) + offset := hostarch.Addr(split - r.Start) for k := range val { k1 := MappingOfRange{ MappingSpace: k.MappingSpace, - AddrRange: usermem.AddrRange{ + AddrRange: hostarch.AddrRange{ Start: k.AddrRange.Start, End: k.AddrRange.Start + offset, }, @@ -138,7 +138,7 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin k2 := MappingOfRange{ MappingSpace: k.MappingSpace, - AddrRange: usermem.AddrRange{ + AddrRange: hostarch.AddrRange{ Start: k.AddrRange.Start + offset, End: k.AddrRange.End, }, @@ -157,18 +157,18 @@ func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uin // indicating that ms maps addresses [0x4000, 0x6000) to MappableRange [0x0, // 0x2000). Then for subsetRange = [0x1000, 0x2000), subsetMapping returns a // MappingOfRange for which AddrRange = [0x5000, 0x6000). -func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr usermem.Addr, writable bool) MappingOfRange { +func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr hostarch.Addr, writable bool) MappingOfRange { if !wholeRange.IsSupersetOf(subsetRange) { panic(fmt.Sprintf("%v is not a superset of %v", wholeRange, subsetRange)) } offset := subsetRange.Start - wholeRange.Start - start := addr + usermem.Addr(offset) + start := addr + hostarch.Addr(offset) return MappingOfRange{ MappingSpace: ms, - AddrRange: usermem.AddrRange{ + AddrRange: hostarch.AddrRange{ Start: start, - End: start + usermem.Addr(subsetRange.Length()), + End: start + hostarch.Addr(subsetRange.Length()), }, Writable: writable, } @@ -178,7 +178,7 @@ func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr // previously had no mappings. // // Preconditions: Same as Mappable.AddMapping. -func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange { +func (s *MappingSet) AddMapping(ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) []MappableRange { mr := MappableRange{offset, offset + uint64(ar.Length())} var mapped []MappableRange seg, gap := s.Find(mr.Start) @@ -205,7 +205,7 @@ func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset ui // MappableRanges that now have no mappings. // // Preconditions: Same as Mappable.RemoveMapping. -func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) []MappableRange { +func (s *MappingSet) RemoveMapping(ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) []MappableRange { mr := MappableRange{offset, offset + uint64(ar.Length())} var unmapped []MappableRange diff --git a/pkg/sentry/memmap/mapping_set_test.go b/pkg/sentry/memmap/mapping_set_test.go index d39efe38f..5cb81fde7 100644 --- a/pkg/sentry/memmap/mapping_set_test.go +++ b/pkg/sentry/memmap/mapping_set_test.go @@ -15,24 +15,23 @@ package memmap import ( + "gvisor.dev/gvisor/pkg/hostarch" "reflect" "testing" - - "gvisor.dev/gvisor/pkg/usermem" ) type testMappingSpace struct { // Ideally we'd store the full ranges that were invalidated, rather // than individual calls to Invalidate, as they are an implementation // detail, but this is the simplest way for now. - inv []usermem.AddrRange + inv []hostarch.AddrRange } func (n *testMappingSpace) reset() { - n.inv = []usermem.AddrRange{} + n.inv = []hostarch.AddrRange{} } -func (n *testMappingSpace) Invalidate(ar usermem.AddrRange, opts InvalidateOpts) { +func (n *testMappingSpace) Invalidate(ar hostarch.AddrRange, opts InvalidateOpts) { n.inv = append(n.inv, ar) } @@ -40,16 +39,16 @@ func TestAddRemoveMapping(t *testing.T) { set := MappingSet{} ms := &testMappingSpace{} - mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true) + mapped := set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true) if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) { t.Errorf("AddMapping: got %+v, wanted %+v", got, want) } - // Mappings (usermem.AddrRanges => memmap.MappableRange): + // Mappings (hostarch.AddrRanges => memmap.MappableRange): // [0x10000, 0x12000) => [0x1000, 0x3000) t.Log(&set) - mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true) + mapped = set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true) if len(mapped) != 0 { t.Errorf("AddMapping: got %+v, wanted []", mapped) } @@ -59,7 +58,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000) t.Log(&set) - mapped = set.AddMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000, true) + mapped = set.AddMapping(ms, hostarch.AddrRange{0x30000, 0x31000}, 0x4000, true) if got, want := mapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) { t.Errorf("AddMapping: got %+v, wanted %+v", got, want) } @@ -70,7 +69,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x30000, 0x31000) => [0x4000, 0x5000) t.Log(&set) - mapped = set.AddMapping(ms, usermem.AddrRange{0x12000, 0x15000}, 0x3000, true) + mapped = set.AddMapping(ms, hostarch.AddrRange{0x12000, 0x15000}, 0x3000, true) if got, want := mapped, []MappableRange{{0x3000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) { t.Errorf("AddMapping: got %+v, wanted %+v", got, want) } @@ -83,7 +82,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x14000, 0x15000) => [0x5000, 0x6000) t.Log(&set) - unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0x1000, true) + unmapped := set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0x1000, true) if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } @@ -95,7 +94,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x14000, 0x15000) => [0x5000, 0x6000) t.Log(&set) - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true) if len(unmapped) != 0 { t.Errorf("RemoveMapping: got %+v, wanted []", unmapped) } @@ -106,7 +105,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x14000, 0x15000) => [0x5000, 0x6000) t.Log(&set) - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x15000}, 0x2000, true) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x11000, 0x15000}, 0x2000, true) if got, want := unmapped, []MappableRange{{0x2000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } @@ -115,7 +114,7 @@ func TestAddRemoveMapping(t *testing.T) { // [0x30000, 0x31000) => [0x4000, 0x5000) t.Log(&set) - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000, true) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x30000, 0x31000}, 0x4000, true) if got, want := unmapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } @@ -125,12 +124,12 @@ func TestInvalidateWholeMapping(t *testing.T) { set := MappingSet{} ms := &testMappingSpace{} - set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0, true) + set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0, true) // Mappings: // [0x10000, 0x11000) => [0, 0x1000) t.Log(&set) set.Invalidate(MappableRange{0, 0x1000}, InvalidateOpts{}) - if got, want := ms.inv, []usermem.AddrRange{{0x10000, 0x11000}}; !reflect.DeepEqual(got, want) { + if got, want := ms.inv, []hostarch.AddrRange{{0x10000, 0x11000}}; !reflect.DeepEqual(got, want) { t.Errorf("Invalidate: got %+v, wanted %+v", got, want) } } @@ -139,12 +138,12 @@ func TestInvalidatePartialMapping(t *testing.T) { set := MappingSet{} ms := &testMappingSpace{} - set.AddMapping(ms, usermem.AddrRange{0x10000, 0x13000}, 0, true) + set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x13000}, 0, true) // Mappings: // [0x10000, 0x13000) => [0, 0x3000) t.Log(&set) set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{}) - if got, want := ms.inv, []usermem.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { + if got, want := ms.inv, []hostarch.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { t.Errorf("Invalidate: got %+v, wanted %+v", got, want) } } @@ -153,14 +152,14 @@ func TestInvalidateMultipleMappings(t *testing.T) { set := MappingSet{} ms := &testMappingSpace{} - set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0, true) - set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true) + set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x11000}, 0, true) + set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true) // Mappings: // [0x10000, 0x11000) => [0, 0x1000) // [0x12000, 0x13000) => [0x2000, 0x3000) t.Log(&set) set.Invalidate(MappableRange{0, 0x3000}, InvalidateOpts{}) - if got, want := ms.inv, []usermem.AddrRange{{0x10000, 0x11000}, {0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { + if got, want := ms.inv, []hostarch.AddrRange{{0x10000, 0x11000}, {0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { t.Errorf("Invalidate: got %+v, wanted %+v", got, want) } } @@ -170,17 +169,17 @@ func TestInvalidateOverlappingMappings(t *testing.T) { ms1 := &testMappingSpace{} ms2 := &testMappingSpace{} - set.AddMapping(ms1, usermem.AddrRange{0x10000, 0x12000}, 0, true) - set.AddMapping(ms2, usermem.AddrRange{0x20000, 0x22000}, 0x1000, true) + set.AddMapping(ms1, hostarch.AddrRange{0x10000, 0x12000}, 0, true) + set.AddMapping(ms2, hostarch.AddrRange{0x20000, 0x22000}, 0x1000, true) // Mappings: // ms1:[0x10000, 0x12000) => [0, 0x2000) // ms2:[0x11000, 0x13000) => [0x1000, 0x3000) t.Log(&set) set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{}) - if got, want := ms1.inv, []usermem.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { + if got, want := ms1.inv, []hostarch.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want) } - if got, want := ms2.inv, []usermem.AddrRange{{0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { + if got, want := ms2.inv, []hostarch.AddrRange{{0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want) } } @@ -189,7 +188,7 @@ func TestMixedWritableMappings(t *testing.T) { set := MappingSet{} ms := &testMappingSpace{} - mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true) + mapped := set.AddMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true) if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) { t.Errorf("AddMapping: got %+v, wanted %+v", got, want) } @@ -198,7 +197,7 @@ func TestMixedWritableMappings(t *testing.T) { // [0x10000, 0x12000) writable => [0x1000, 0x3000) t.Log(&set) - mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x22000}, 0x2000, false) + mapped = set.AddMapping(ms, hostarch.AddrRange{0x20000, 0x22000}, 0x2000, false) if got, want := mapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) { t.Errorf("AddMapping: got %+v, wanted %+v", got, want) } @@ -211,14 +210,14 @@ func TestMixedWritableMappings(t *testing.T) { // Unmap should fail because we specified the readonly map address range, but // asked to unmap a writable segment. - unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, true) + unmapped := set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, true) if len(unmapped) != 0 { t.Errorf("RemoveMapping: got %+v, wanted []", unmapped) } // Readonly mapping removed, but writable mapping still exists in the range, // so no mappable range fully unmapped. - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000, false) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x20000, 0x21000}, 0x2000, false) if len(unmapped) != 0 { t.Errorf("RemoveMapping: got %+v, wanted []", unmapped) } @@ -228,7 +227,7 @@ func TestMixedWritableMappings(t *testing.T) { // [0x21000, 0x22000) readonly => [0x3000, 0x4000) t.Log(&set) - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x12000}, 0x2000, true) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x11000, 0x12000}, 0x2000, true) if got, want := unmapped, []MappableRange{{0x2000, 0x3000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } @@ -239,12 +238,12 @@ func TestMixedWritableMappings(t *testing.T) { t.Log(&set) // Unmap should fail since writable bit doesn't match. - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, false) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, false) if len(unmapped) != 0 { t.Errorf("RemoveMapping: got %+v, wanted []", unmapped) } - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000, true) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x10000, 0x12000}, 0x1000, true) if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } @@ -253,7 +252,7 @@ func TestMixedWritableMappings(t *testing.T) { // [0x21000, 0x22000) readonly => [0x3000, 0x4000) t.Log(&set) - unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x21000, 0x22000}, 0x3000, false) + unmapped = set.RemoveMapping(ms, hostarch.AddrRange{0x21000, 0x22000}, 0x3000, false) if got, want := unmapped, []MappableRange{{0x3000, 0x4000}}; !reflect.DeepEqual(got, want) { t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) } diff --git a/pkg/sentry/memmap/memmap.go b/pkg/sentry/memmap/memmap.go index 49e21026e..72868646a 100644 --- a/pkg/sentry/memmap/memmap.go +++ b/pkg/sentry/memmap/memmap.go @@ -19,8 +19,8 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" - "gvisor.dev/gvisor/pkg/usermem" ) // Mappable represents a memory-mappable object, a mutable mapping from uint64 @@ -29,8 +29,8 @@ import ( // See mm/mm.go for Mappable's place in the lock order. // // All Mappable methods have the following preconditions: -// * usermem.AddrRanges and MappableRanges must be non-empty (Length() != 0). -// * usermem.Addrs and Mappable offsets must be page-aligned. +// * hostarch.AddrRanges and MappableRanges must be non-empty (Length() != 0). +// * hostarch.Addrs and Mappable offsets must be page-aligned. type Mappable interface { // AddMapping notifies the Mappable of a mapping from addresses ar in ms to // offsets [offset, offset+ar.Length()) in this Mappable. @@ -42,7 +42,7 @@ type Mappable interface { // lifetime of the mapping. // // Preconditions: offset+ar.Length() does not overflow. - AddMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error + AddMapping(ctx context.Context, ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error // RemoveMapping notifies the Mappable of the removal of a mapping from // addresses ar in ms to offsets [offset, offset+ar.Length()) in this @@ -52,7 +52,7 @@ type Mappable interface { // * offset+ar.Length() does not overflow. // * The removed mapping must exist. writable must match the // corresponding call to AddMapping. - RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) + RemoveMapping(ctx context.Context, ms MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) // CopyMapping notifies the Mappable of an attempt to copy a mapping in ms // from srcAR to dstAR. For most Mappables, this is equivalent to @@ -66,7 +66,7 @@ type Mappable interface { // * offset+srcAR.Length() and offset+dstAR.Length() do not overflow. // * The mapping at srcAR must exist. writable must match the // corresponding call to AddMapping. - CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error + CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error // Translate returns the Mappable's current mappings for at least the range // of offsets specified by required, and at most the range of offsets @@ -90,7 +90,7 @@ type Mappable interface { // synchronize with invalidation. // // Postconditions: See CheckTranslateResult. - Translate(ctx context.Context, required, optional MappableRange, at usermem.AccessType) ([]Translation, error) + Translate(ctx context.Context, required, optional MappableRange, at hostarch.AccessType) ([]Translation, error) // InvalidateUnsavable requests that the Mappable invalidate Translations // that cannot be preserved across save/restore. @@ -113,7 +113,7 @@ type Translation struct { // Perms is the set of permissions for which platform.AddressSpace.MapFile // and platform.AddressSpace.MapInternal on this Translation is permitted. - Perms usermem.AccessType + Perms hostarch.AccessType } // FileRange returns the FileRange represented by t. @@ -125,18 +125,18 @@ func (t Translation) FileRange() FileRange { // postconditions for Mappable.Translate(required, optional, at). // // Preconditions: Same as Mappable.Translate. -func CheckTranslateResult(required, optional MappableRange, at usermem.AccessType, ts []Translation, terr error) error { +func CheckTranslateResult(required, optional MappableRange, at hostarch.AccessType, ts []Translation, terr error) error { // Verify that the inputs to Mappable.Translate were valid. if !required.WellFormed() || required.Length() == 0 { panic(fmt.Sprintf("invalid required range: %v", required)) } - if !usermem.Addr(required.Start).IsPageAligned() || !usermem.Addr(required.End).IsPageAligned() { + if !hostarch.Addr(required.Start).IsPageAligned() || !hostarch.Addr(required.End).IsPageAligned() { panic(fmt.Sprintf("unaligned required range: %v", required)) } if !optional.IsSupersetOf(required) { panic(fmt.Sprintf("optional range %v is not a superset of required range %v", optional, required)) } - if !usermem.Addr(optional.Start).IsPageAligned() || !usermem.Addr(optional.End).IsPageAligned() { + if !hostarch.Addr(optional.Start).IsPageAligned() || !hostarch.Addr(optional.End).IsPageAligned() { panic(fmt.Sprintf("unaligned optional range: %v", optional)) } @@ -148,13 +148,13 @@ func CheckTranslateResult(required, optional MappableRange, at usermem.AccessTyp if !t.Source.WellFormed() || t.Source.Length() == 0 { return fmt.Errorf("Translation %+v has invalid Source", t) } - if !usermem.Addr(t.Source.Start).IsPageAligned() || !usermem.Addr(t.Source.End).IsPageAligned() { + if !hostarch.Addr(t.Source.Start).IsPageAligned() || !hostarch.Addr(t.Source.End).IsPageAligned() { return fmt.Errorf("Translation %+v has unaligned Source", t) } if t.File == nil { return fmt.Errorf("Translation %+v has nil File", t) } - if !usermem.Addr(t.Offset).IsPageAligned() { + if !hostarch.Addr(t.Offset).IsPageAligned() { return fmt.Errorf("Translation %+v has unaligned Offset", t) } // Translations must be contiguous and in increasing order of @@ -210,7 +210,7 @@ func (mr MappableRange) String() string { return fmt.Sprintf("[%#x, %#x)", mr.Start, mr.End) } -// MappingSpace represents a mutable mapping from usermem.Addrs to (Mappable, +// MappingSpace represents a mutable mapping from hostarch.Addrs to (Mappable, // uint64 offset) pairs. type MappingSpace interface { // Invalidate is called to notify the MappingSpace that values returned by @@ -223,7 +223,7 @@ type MappingSpace interface { // Preconditions: // * ar.Length() != 0. // * ar must be page-aligned. - Invalidate(ar usermem.AddrRange, opts InvalidateOpts) + Invalidate(ar hostarch.AddrRange, opts InvalidateOpts) } // InvalidateOpts holds options to MappingSpace.Invalidate. @@ -321,7 +321,7 @@ type MMapOpts struct { Offset uint64 // Addr is the suggested address for the mapping. - Addr usermem.Addr + Addr hostarch.Addr // Fixed specifies whether this is a fixed mapping (it must be located at // Addr). @@ -338,7 +338,7 @@ type MMapOpts struct { Map32Bit bool // Perms is the set of permissions to the applied to this mapping. - Perms usermem.AccessType + Perms hostarch.AccessType // MaxPerms limits the set of permissions that may ever apply to this // mapping. If Mappable is not nil, all memmap.Translations returned by @@ -346,7 +346,7 @@ type MMapOpts struct { // // Preconditions: MaxAccessType should be an effective AccessType, as // access cannot be limited beyond effective AccessTypes. - MaxPerms usermem.AccessType + MaxPerms hostarch.AccessType // Private is true if writes to the mapping should be propagated to a copy // that is exclusive to the MemoryManager. @@ -410,7 +410,7 @@ type File interface { // // Postconditions: The returned mapping is valid as long as at least one // reference is held on the mapped pages. - MapInternal(fr FileRange, at usermem.AccessType) (safemem.BlockSeq, error) + MapInternal(fr FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) // FD returns the file descriptor represented by the File. // diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD index 6dbeccfe2..b417c2da7 100644 --- a/pkg/sentry/mm/BUILD +++ b/pkg/sentry/mm/BUILD @@ -28,14 +28,14 @@ go_template_instance( "trackGaps": "1", }, imports = { - "usermem": "gvisor.dev/gvisor/pkg/usermem", + "hostarch": "gvisor.dev/gvisor/pkg/hostarch", }, package = "mm", prefix = "vma", template = "//pkg/segment:generic_set", types = { - "Key": "usermem.Addr", - "Range": "usermem.AddrRange", + "Key": "hostarch.Addr", + "Range": "hostarch.AddrRange", "Value": "vma", "Functions": "vmaSetFunctions", }, @@ -48,14 +48,14 @@ go_template_instance( "minDegree": "8", }, imports = { - "usermem": "gvisor.dev/gvisor/pkg/usermem", + "hostarch": "gvisor.dev/gvisor/pkg/hostarch", }, package = "mm", prefix = "pma", template = "//pkg/segment:generic_set", types = { - "Key": "usermem.Addr", - "Range": "usermem.AddrRange", + "Key": "hostarch.Addr", + "Range": "hostarch.AddrRange", "Value": "pma", "Functions": "pmaSetFunctions", }, @@ -125,6 +125,7 @@ go_library( "//pkg/abi/linux", "//pkg/atomicbitops", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", @@ -155,6 +156,7 @@ go_test( library = ":mm", deps = [ "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/arch", "//pkg/sentry/contexttest", "//pkg/sentry/limits", diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go index a93e76c75..534e0e957 100644 --- a/pkg/sentry/mm/address_space.go +++ b/pkg/sentry/mm/address_space.go @@ -19,8 +19,8 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) // AddressSpace returns the platform.AddressSpace bound to mm. @@ -172,17 +172,17 @@ func (mm *MemoryManager) Deactivate() { // * ar.Length() != 0. // * ar must be page-aligned. // * pseg == mm.pmas.LowerBoundSegment(ar.Start). -func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error { +func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar hostarch.AddrRange, precommit bool) error { // By default, map entire pmas at a time, under the assumption that there // is no cost to mapping more of a pma than necessary. - mapAR := usermem.AddrRange{0, ^usermem.Addr(usermem.PageSize - 1)} + mapAR := hostarch.AddrRange{0, ^hostarch.Addr(hostarch.PageSize - 1)} if precommit { // When explicitly precommitting, only map ar, since overmapping may // incur unexpected resource usage. mapAR = ar } else if mapUnit := mm.p.MapUnit(); mapUnit != 0 { // Limit the range we map to ar, aligned to mapUnit. - mapMask := usermem.Addr(mapUnit - 1) + mapMask := hostarch.Addr(mapUnit - 1) mapAR.Start = ar.Start &^ mapMask // If rounding ar.End up overflows, just keep the existing mapAR.End. if end := (ar.End + mapMask) &^ mapMask; end >= ar.End { @@ -218,7 +218,7 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, pre // unmapASLocked removes all AddressSpace mappings for addresses in ar. // // Preconditions: mm.activeMu must be locked. -func (mm *MemoryManager) unmapASLocked(ar usermem.AddrRange) { +func (mm *MemoryManager) unmapASLocked(ar hostarch.AddrRange) { if mm.as == nil { // No AddressSpace? Force all mappings to be unmapped on the next // Activate. diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go index 5ab2ef79f..346866d3c 100644 --- a/pkg/sentry/mm/aio_context.go +++ b/pkg/sentry/mm/aio_context.go @@ -17,6 +17,7 @@ package mm import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/usage" @@ -83,7 +84,7 @@ func (mm *MemoryManager) destroyAIOContextLocked(ctx context.Context, id uint64) // the same address. Then it would be unmapping memory that it doesn't own. // This is, however, the way Linux implements AIO. Keeps the same [weird] // semantics in case anyone relies on it. - mm.MUnmap(ctx, usermem.Addr(id), aioRingBufferSize) + mm.MUnmap(ctx, hostarch.Addr(id), aioRingBufferSize) delete(mm.aioManager.contexts, id) aioCtx.destroy() @@ -259,7 +260,7 @@ type aioMappable struct { fr memmap.FileRange } -var aioRingBufferSize = uint64(usermem.Addr(linux.AIORingSize).MustRoundUp()) +var aioRingBufferSize = uint64(hostarch.Addr(linux.AIORingSize).MustRoundUp()) func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) { fr, err := mfp.MemoryFile().Allocate(aioRingBufferSize, usage.Anonymous) @@ -300,7 +301,7 @@ func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error } // AddMapping implements memmap.Mappable.AddMapping. -func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar usermem.AddrRange, offset uint64, _ bool) error { +func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, _ bool) error { // Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap() // sets VM_DONTEXPAND). if offset != 0 || uint64(ar.Length()) != aioRingBufferSize { @@ -310,11 +311,11 @@ func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar us } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) { +func (m *aioMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) { } // CopyMapping implements memmap.Mappable.CopyMapping. -func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, _ bool) error { +func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, _ bool) error { // Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap() // sets VM_DONTEXPAND). if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize { @@ -346,7 +347,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s } // Translate implements memmap.Mappable.Translate. -func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { var err error if required.End > m.fr.Length() { err = &memmap.BusError{syserror.EFAULT} @@ -357,7 +358,7 @@ func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.M Source: source, File: m.mfp.MemoryFile(), Offset: m.fr.Start + source.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, err } @@ -389,8 +390,8 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint // Linux uses "do_mmap_pgoff(..., PROT_READ | PROT_WRITE, ...)" in // fs/aio.c:aio_setup_ring(). Since we don't implement AIO_RING_MAGIC, // user mode should not write to this page. - Perms: usermem.Read, - MaxPerms: usermem.Read, + Perms: hostarch.Read, + MaxPerms: hostarch.Read, }) if err != nil { return 0, err @@ -435,6 +436,6 @@ func (mm *MemoryManager) LookupAIOContext(ctx context.Context, id uint64) (*AIOC // bytes from id). func (mm *MemoryManager) isValidAddr(ctx context.Context, id uint64) bool { var buf [4]byte - _, err := mm.CopyIn(ctx, usermem.Addr(id), buf[:], usermem.IOOpts{}) + _, err := mm.CopyIn(ctx, hostarch.Addr(id), buf[:], usermem.IOOpts{}) return err == nil } diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go index a8ac48080..16f318ab3 100644 --- a/pkg/sentry/mm/io.go +++ b/pkg/sentry/mm/io.go @@ -16,6 +16,7 @@ package mm import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/syserror" @@ -60,11 +61,11 @@ const ( rwMapMinBytes = 512 ) -// CheckIORange is similar to usermem.Addr.ToRange, but applies bounds checks +// CheckIORange is similar to hostarch.Addr.ToRange, but applies bounds checks // consistent with Linux's arch/x86/include/asm/uaccess.h:access_ok(). // // Preconditions: length >= 0. -func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem.AddrRange, bool) { +func (mm *MemoryManager) CheckIORange(addr hostarch.Addr, length int64) (hostarch.AddrRange, bool) { // Note that access_ok() constrains end even if length == 0. ar, ok := addr.ToRange(uint64(length)) return ar, (ok && ar.End <= mm.layout.MaxAddr) @@ -72,7 +73,7 @@ func (mm *MemoryManager) CheckIORange(addr usermem.Addr, length int64) (usermem. // checkIOVec applies bound checks consistent with Linux's // arch/x86/include/asm/uaccess.h:access_ok() to ars. -func (mm *MemoryManager) checkIOVec(ars usermem.AddrRangeSeq) bool { +func (mm *MemoryManager) checkIOVec(ars hostarch.AddrRangeSeq) bool { for !ars.IsEmpty() { ar := ars.Head() if _, ok := mm.CheckIORange(ar.Start, int64(ar.Length())); !ok { @@ -100,7 +101,7 @@ func translateIOError(ctx context.Context, err error) error { } // CopyOut implements usermem.IO.CopyOut. -func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) { +func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) { ar, ok := mm.CheckIORange(addr, int64(len(src))) if !ok { return 0, syserror.EFAULT @@ -116,24 +117,24 @@ func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []b } // Go through internal mappings. - n64, err := mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + n64, err := mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src))) return n, translateIOError(ctx, err) }) return int(n64), err } -func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src []byte) (int, error) { +func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src []byte) (int, error) { var done int for { - n, err := mm.as.CopyOut(addr+usermem.Addr(done), src[done:]) + n, err := mm.as.CopyOut(addr+hostarch.Addr(done), src[done:]) done += n if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { ar, _ := addr.ToRange(uint64(len(src))) - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil { return done, err } continue @@ -143,7 +144,7 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src [ } // CopyIn implements usermem.IO.CopyIn. -func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) { +func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) { ar, ok := mm.CheckIORange(addr, int64(len(dst))) if !ok { return 0, syserror.EFAULT @@ -159,24 +160,24 @@ func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []by } // Go through internal mappings. - n64, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + n64, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims) return n, translateIOError(ctx, err) }) return int(n64), err } -func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []byte) (int, error) { +func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte) (int, error) { var done int for { - n, err := mm.as.CopyIn(addr+usermem.Addr(done), dst[done:]) + n, err := mm.as.CopyIn(addr+hostarch.Addr(done), dst[done:]) done += n if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { ar, _ := addr.ToRange(uint64(len(dst))) - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil { return done, err } continue @@ -186,7 +187,7 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst [] } // ZeroOut implements usermem.IO.ZeroOut. -func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { +func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { ar, ok := mm.CheckIORange(addr, toZero) if !ok { return 0, syserror.EFAULT @@ -202,23 +203,23 @@ func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero } // Go through internal mappings. - return mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) { + return mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) { n, err := safemem.ZeroSeq(dsts) return n, translateIOError(ctx, err) }) } -func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZero int64) (int64, error) { +func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64) (int64, error) { var done int64 for { - n, err := mm.as.ZeroOut(addr+usermem.Addr(done), uintptr(toZero-done)) + n, err := mm.as.ZeroOut(addr+hostarch.Addr(done), uintptr(toZero-done)) done += int64(n) if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { ar, _ := addr.ToRange(uint64(toZero)) - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil { return done, err } continue @@ -228,7 +229,7 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZer } // CopyOutFrom implements usermem.IO.CopyOutFrom. -func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { +func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { if !mm.checkIOVec(ars) { return 0, syserror.EFAULT } @@ -269,11 +270,11 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeS } // Go through internal mappings. - return mm.withVecInternalMappings(ctx, ars, usermem.Write, opts.IgnorePermissions, src.ReadToBlocks) + return mm.withVecInternalMappings(ctx, ars, hostarch.Write, opts.IgnorePermissions, src.ReadToBlocks) } // CopyInTo implements usermem.IO.CopyInTo. -func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { +func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { if !mm.checkIOVec(ars) { return 0, syserror.EFAULT } @@ -306,11 +307,11 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, } // Go through internal mappings. - return mm.withVecInternalMappings(ctx, ars, usermem.Read, opts.IgnorePermissions, dst.WriteFromBlocks) + return mm.withVecInternalMappings(ctx, ars, hostarch.Read, opts.IgnorePermissions, dst.WriteFromBlocks) } // SwapUint32 implements usermem.IO.SwapUint32. -func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) { +func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) { ar, ok := mm.CheckIORange(addr, 4) if !ok { return 0, syserror.EFAULT @@ -324,7 +325,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new return old, nil } if f, ok := err.(platform.SegmentationFault); ok { - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil { return 0, err } continue @@ -335,7 +336,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new // Go through internal mappings. var old uint32 - _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { if ims.NumBlocks() != 1 || ims.NumBytes() != 4 { // Atomicity is unachievable across mappings. return 0, syserror.EFAULT @@ -353,7 +354,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new } // CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32. -func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) { +func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) { ar, ok := mm.CheckIORange(addr, 4) if !ok { return 0, syserror.EFAULT @@ -367,7 +368,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem. return prev, nil } if f, ok := err.(platform.SegmentationFault); ok { - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.ReadWrite); err != nil { return 0, err } continue @@ -378,7 +379,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem. // Go through internal mappings. var prev uint32 - _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + _, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { if ims.NumBlocks() != 1 || ims.NumBytes() != 4 { // Atomicity is unachievable across mappings. return 0, syserror.EFAULT @@ -396,7 +397,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem. } // LoadUint32 implements usermem.IO.LoadUint32. -func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts usermem.IOOpts) (uint32, error) { +func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) { ar, ok := mm.CheckIORange(addr, 4) if !ok { return 0, syserror.EFAULT @@ -410,7 +411,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts return val, nil } if f, ok := err.(platform.SegmentationFault); ok { - if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil { + if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil { return 0, err } continue @@ -421,7 +422,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts // Go through internal mappings. var val uint32 - _, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + _, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { if ims.NumBlocks() != 1 || ims.NumBytes() != 4 { // Atomicity is unachievable across mappings. return 0, syserror.EFAULT @@ -445,11 +446,11 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr usermem.Addr, opts // * mm.as != nil. // * ioar.Length() != 0. // * ioar.Contains(addr). -func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error { +func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr hostarch.Addr, ioar hostarch.AddrRange, at hostarch.AccessType) error { // Try to map all remaining pages in the I/O operation. This RoundUp can't // overflow because otherwise it would have been caught by CheckIORange. end, _ := ioar.End.RoundUp() - ar := usermem.AddrRange{addr.RoundDown(), end} + ar := hostarch.AddrRange{addr.RoundDown(), end} // Don't bother trying existingPMAsLocked; in most cases, if we did have // existing pmas, we wouldn't have faulted. @@ -498,7 +499,7 @@ func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, // more useful for usermem.IO methods. // // Preconditions: 0 < ar.Length() <= math.MaxInt64. -func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { +func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { // If pmas are already available, we can do IO without touching mm.vmas or // mm.mappingMu. mm.activeMu.RLock() @@ -567,7 +568,7 @@ func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.Ad // internal mappings for the subset of ars for which this property holds. // // Preconditions: !ars.IsEmpty(). -func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { +func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { // withInternalMappings is faster than withVecInternalMappings because of // iterator plumbing (this isn't generally practical in the vector case due // to iterator invalidation between AddrRanges). Use it if possible. @@ -630,12 +631,12 @@ func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars userme // truncatedAddrRangeSeq returns a copy of ars, but with the end truncated to // at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to -// truncate usermem.AddrRangeSeq when errors occur. +// truncate hostarch.AddrRangeSeq when errors occur. // // Preconditions: // * !arsit.IsEmpty(). // * end <= arsit.Head().End. -func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq { +func truncatedAddrRangeSeq(ars, arsit hostarch.AddrRangeSeq, end hostarch.Addr) hostarch.AddrRangeSeq { ar := arsit.Head() if end <= ar.Start { return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes()) diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go index 120707429..a79ef9223 100644 --- a/pkg/sentry/mm/lifecycle.go +++ b/pkg/sentry/mm/lifecycle.go @@ -19,12 +19,12 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) // NewMemoryManager returns a new MemoryManager with no mappings and 1 user. @@ -139,7 +139,7 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) { } srcvseg := mm.vmas.FirstSegment() dstpgap := mm2.pmas.FirstGap() - var unmapAR usermem.AddrRange + var unmapAR hostarch.AddrRange for srcpseg := mm.pmas.FirstSegment(); srcpseg.Ok(); srcpseg = srcpseg.NextSegment() { pma := srcpseg.ValuePtr() if !pma.private { diff --git a/pkg/sentry/mm/metadata.go b/pkg/sentry/mm/metadata.go index 0cfd60f6c..28c5fead9 100644 --- a/pkg/sentry/mm/metadata.go +++ b/pkg/sentry/mm/metadata.go @@ -16,9 +16,9 @@ package mm import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsbridge" - "gvisor.dev/gvisor/pkg/usermem" ) // Dumpability describes if and how core dumps should be created. @@ -54,14 +54,14 @@ func (mm *MemoryManager) SetDumpability(d Dumpability) { // ArgvStart returns the start of the application argument vector. // // There is no guarantee that this value is sensible w.r.t. ArgvEnd. -func (mm *MemoryManager) ArgvStart() usermem.Addr { +func (mm *MemoryManager) ArgvStart() hostarch.Addr { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() return mm.argv.Start } // SetArgvStart sets the start of the application argument vector. -func (mm *MemoryManager) SetArgvStart(a usermem.Addr) { +func (mm *MemoryManager) SetArgvStart(a hostarch.Addr) { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() mm.argv.Start = a @@ -70,14 +70,14 @@ func (mm *MemoryManager) SetArgvStart(a usermem.Addr) { // ArgvEnd returns the end of the application argument vector. // // There is no guarantee that this value is sensible w.r.t. ArgvStart. -func (mm *MemoryManager) ArgvEnd() usermem.Addr { +func (mm *MemoryManager) ArgvEnd() hostarch.Addr { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() return mm.argv.End } // SetArgvEnd sets the end of the application argument vector. -func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) { +func (mm *MemoryManager) SetArgvEnd(a hostarch.Addr) { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() mm.argv.End = a @@ -86,14 +86,14 @@ func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) { // EnvvStart returns the start of the application environment vector. // // There is no guarantee that this value is sensible w.r.t. EnvvEnd. -func (mm *MemoryManager) EnvvStart() usermem.Addr { +func (mm *MemoryManager) EnvvStart() hostarch.Addr { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() return mm.envv.Start } // SetEnvvStart sets the start of the application environment vector. -func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) { +func (mm *MemoryManager) SetEnvvStart(a hostarch.Addr) { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() mm.envv.Start = a @@ -102,14 +102,14 @@ func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) { // EnvvEnd returns the end of the application environment vector. // // There is no guarantee that this value is sensible w.r.t. EnvvStart. -func (mm *MemoryManager) EnvvEnd() usermem.Addr { +func (mm *MemoryManager) EnvvEnd() hostarch.Addr { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() return mm.envv.End } // SetEnvvEnd sets the end of the application environment vector. -func (mm *MemoryManager) SetEnvvEnd(a usermem.Addr) { +func (mm *MemoryManager) SetEnvvEnd(a hostarch.Addr) { mm.metadataMu.Lock() defer mm.metadataMu.Unlock() mm.envv.End = a diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go index 92cc87d84..57969b26c 100644 --- a/pkg/sentry/mm/mm.go +++ b/pkg/sentry/mm/mm.go @@ -36,6 +36,7 @@ package mm import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsbridge" @@ -43,7 +44,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // MemoryManager implements a virtual address space. @@ -97,7 +97,7 @@ type MemoryManager struct { // binary into the mm. // // brk is protected by mappingMu. - brk usermem.AddrRange + brk hostarch.AddrRange // usageAS is vmas.Span(), cached to accelerate RLIMIT_AS checks. // @@ -198,14 +198,14 @@ type MemoryManager struct { // requirements apply to argv; we do not require that argv.WellFormed(). // // argv is protected by metadataMu. - argv usermem.AddrRange + argv hostarch.AddrRange // envv is the application envv. This is set up by the loader and may be // modified by prctl(PR_SET_MM_ENV_START/PR_SET_MM_ENV_END). No // requirements apply to envv; we do not require that envv.WellFormed(). // // envv is protected by metadataMu. - envv usermem.AddrRange + envv hostarch.AddrRange // auxv is the ELF's auxiliary vector. // @@ -268,20 +268,20 @@ type vma struct { // realPerms are the memory permissions on this vma, as defined by the // application. - realPerms usermem.AccessType `state:".(int)"` + realPerms hostarch.AccessType `state:".(int)"` // effectivePerms are the memory permissions on this vma which are // actually used to control access. // // Invariant: effectivePerms == realPerms.Effective(). - effectivePerms usermem.AccessType `state:"manual"` + effectivePerms hostarch.AccessType `state:"manual"` // maxPerms limits the set of permissions that may ever apply to this // memory, as well as accesses for which usermem.IOOpts.IgnorePermissions // is true (e.g. ptrace(PTRACE_POKEDATA)). // // Invariant: maxPerms == maxPerms.Effective(). - maxPerms usermem.AccessType `state:"manual"` + maxPerms hostarch.AccessType `state:"manual"` // private is true if this is a MAP_PRIVATE mapping, such that writes to // the mapping are propagated to a copy. @@ -421,8 +421,8 @@ type pma struct { off uint64 // translatePerms is the permissions returned by memmap.Mappable.Translate. - // If private is true, translatePerms is usermem.AnyAccess. - translatePerms usermem.AccessType + // If private is true, translatePerms is hostarch.AnyAccess. + translatePerms hostarch.AccessType // effectivePerms is the permissions allowed for non-ignorePermissions // accesses. maxPerms is the permissions allowed for ignorePermissions @@ -432,8 +432,8 @@ type pma struct { // // These are stored in the pma so that the IO implementation can avoid // iterating mm.vmas when pmas already exist. - effectivePerms usermem.AccessType - maxPerms usermem.AccessType + effectivePerms hostarch.AccessType + maxPerms hostarch.AccessType // needCOW is true if writes to the mapping must be propagated to a copy. needCOW bool @@ -465,7 +465,7 @@ type privateRefs struct { } type invalidateArgs struct { - ar usermem.AddrRange + ar hostarch.AddrRange opts memmap.InvalidateOpts } diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go index bc53bd41e..1304b0a2f 100644 --- a/pkg/sentry/mm/mm_test.go +++ b/pkg/sentry/mm/mm_test.go @@ -18,6 +18,7 @@ import ( "testing" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/contexttest" "gvisor.dev/gvisor/pkg/sentry/limits" @@ -51,7 +52,7 @@ func TestUsageASUpdates(t *testing.T) { defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ - Length: 2 * usermem.PageSize, + Length: 2 * hostarch.PageSize, Private: true, }) if err != nil { @@ -62,7 +63,7 @@ func TestUsageASUpdates(t *testing.T) { t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) } - mm.MUnmap(ctx, addr, usermem.PageSize) + mm.MUnmap(ctx, addr, hostarch.PageSize) realUsage = mm.realUsageAS() if mm.usageAS != realUsage { t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) @@ -86,10 +87,10 @@ func TestDataASUpdates(t *testing.T) { defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ - Length: 3 * usermem.PageSize, + Length: 3 * hostarch.PageSize, Private: true, - Perms: usermem.Write, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.Write, + MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) @@ -102,19 +103,19 @@ func TestDataASUpdates(t *testing.T) { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } - mm.MUnmap(ctx, addr, usermem.PageSize) + mm.MUnmap(ctx, addr, hostarch.PageSize) realDataAS = mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } - mm.MProtect(addr+usermem.PageSize, usermem.PageSize, usermem.Read, false) + mm.MProtect(addr+hostarch.PageSize, hostarch.PageSize, hostarch.Read, false) realDataAS = mm.realDataAS() if mm.dataAS != realDataAS { t.Fatalf("dataAS believes %v bytes are mapped; %v bytes are actually mapped", mm.dataAS, realDataAS) } - mm.MRemap(ctx, addr+2*usermem.PageSize, usermem.PageSize, 2*usermem.PageSize, MRemapOpts{ + mm.MRemap(ctx, addr+2*hostarch.PageSize, hostarch.PageSize, 2*hostarch.PageSize, MRemapOpts{ Move: MRemapMayMove, }) realDataAS = mm.realDataAS() @@ -133,7 +134,7 @@ func TestBrkDataLimitUpdates(t *testing.T) { // Try to extend the brk by one page and expect doing so to fail. oldBrk, _ := mm.Brk(ctx, 0) - if newBrk, _ := mm.Brk(ctx, oldBrk+usermem.PageSize); newBrk != oldBrk { + if newBrk, _ := mm.Brk(ctx, oldBrk+hostarch.PageSize); newBrk != oldBrk { t.Errorf("brk() increased data segment above RLIMIT_DATA (old brk = %#x, new brk = %#x", oldBrk, newBrk) } } @@ -145,10 +146,10 @@ func TestIOAfterUnmap(t *testing.T) { defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ - Length: usermem.PageSize, + Length: hostarch.PageSize, Private: true, - Perms: usermem.Read, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.Read, + MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) @@ -164,7 +165,7 @@ func TestIOAfterUnmap(t *testing.T) { t.Errorf("CopyIn got %d want 1", n) } - err = mm.MUnmap(ctx, addr, usermem.PageSize) + err = mm.MUnmap(ctx, addr, hostarch.PageSize) if err != nil { t.Fatalf("MUnmap got err %v want nil", err) } @@ -185,10 +186,10 @@ func TestIOAfterMProtect(t *testing.T) { defer mm.DecUsers(ctx) addr, err := mm.MMap(ctx, memmap.MMapOpts{ - Length: usermem.PageSize, + Length: hostarch.PageSize, Private: true, - Perms: usermem.ReadWrite, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.ReadWrite, + MaxPerms: hostarch.AnyAccess, }) if err != nil { t.Fatalf("MMap got err %v want nil", err) @@ -204,7 +205,7 @@ func TestIOAfterMProtect(t *testing.T) { t.Errorf("CopyOut got %d want 1", n) } - err = mm.MProtect(addr, usermem.PageSize, usermem.Read, false) + err = mm.MProtect(addr, hostarch.PageSize, hostarch.Read, false) if err != nil { t.Errorf("MProtect got err %v want nil", err) } diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go index 7e5f7de64..5583f62b2 100644 --- a/pkg/sentry/mm/pma.go +++ b/pkg/sentry/mm/pma.go @@ -18,12 +18,12 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safecopy" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // existingPMAsLocked checks that pmas exist for all addresses in ar, and @@ -34,7 +34,7 @@ import ( // Preconditions: // * mm.activeMu must be locked. // * ar.Length() != 0. -func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator { +func (mm *MemoryManager) existingPMAsLocked(ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -70,7 +70,7 @@ func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.Acc // and support access of type (at, ignorePermissions). // // Preconditions: mm.activeMu must be locked. -func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, needInternalMappings bool) bool { +func (mm *MemoryManager) existingVecPMAsLocked(ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) bool { for ; !ars.IsEmpty(); ars = ars.Tail() { if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, needInternalMappings).Ok() { return false @@ -98,7 +98,7 @@ func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at user // * vseg.Range().Contains(ar.Start). // * vmas must exist for all addresses in ar, and support accesses of type at // (i.e. permission checks must have been performed against vmas). -func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) { +func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -118,7 +118,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar end = ar.End.RoundDown() alignerr = syserror.EFAULT } - ar = usermem.AddrRange{ar.Start.RoundDown(), end} + ar = hostarch.AddrRange{ar.Start.RoundDown(), end} pstart, pend, perr := mm.getPMAsInternalLocked(ctx, vseg, ar, at) if pend.Start() <= ar.Start { @@ -145,7 +145,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar // * mm.activeMu must be locked for writing. // * vmas must exist for all addresses in ars, and support accesses of type at // (i.e. permission checks must have been performed against vmas). -func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType) (usermem.AddrRangeSeq, error) { +func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType) (hostarch.AddrRangeSeq, error) { for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { ar := arsit.Head() if ar.Length() == 0 { @@ -164,7 +164,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR end = ar.End.RoundDown() alignerr = syserror.EFAULT } - ar = usermem.AddrRange{ar.Start.RoundDown(), end} + ar = hostarch.AddrRange{ar.Start.RoundDown(), end} _, pend, perr := mm.getPMAsInternalLocked(ctx, mm.vmas.FindSegment(ar.Start), ar, at) if perr != nil { @@ -191,7 +191,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrR // // getPMAsInternalLocked is an implementation helper for getPMAsLocked and // getVecPMAsLocked; other clients should call one of those instead. -func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, at usermem.AccessType) (pmaIterator, pmaGapIterator, error) { +func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -245,7 +245,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter pseg, pgap = mm.pmas.Insert(pgap, allocAR, pma{ file: mf, off: fr.Start, - translatePerms: usermem.AnyAccess, + translatePerms: hostarch.AnyAccess, effectivePerms: vma.effectivePerms, maxPerms: vma.maxPerms, // Since we just allocated this memory and have the @@ -335,7 +335,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter // Neither of these cases has enough spatial locality to // benefit from copying nearby pages, so if the vma is // executable, only copy the pages required. - var copyAR usermem.AddrRange + var copyAR hostarch.AddrRange if vseg.ValuePtr().effectivePerms.Execute { copyAR = pseg.Range().Intersect(ar) } else { @@ -366,7 +366,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter // Replace the pma with a copy in the part of the address // range where copying was successful. This doesn't change // RSS. - copyAR.End = copyAR.Start + usermem.Addr(fr.Length()) + copyAR.End = copyAR.Start + hostarch.Addr(fr.Length()) if copyAR != pseg.Range() { pseg = mm.pmas.Isolate(pseg, copyAR) pstart = pmaIterator{} // iterators invalidated @@ -380,7 +380,7 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter mf.IncRef(fr) oldpma.file = mf oldpma.off = fr.Start - oldpma.translatePerms = usermem.AnyAccess + oldpma.translatePerms = hostarch.AnyAccess oldpma.effectivePerms = vma.effectivePerms oldpma.maxPerms = vma.maxPerms oldpma.needCOW = false @@ -499,14 +499,14 @@ const ( // privateAllocUnit may reduce page faults by allowing fewer, larger pmas // to be mapped, but may result in larger amounts of wasted memory in the // presence of fragmentation. privateAllocUnit must be a power-of-2 - // multiple of usermem.PageSize. - privateAllocUnit = usermem.HugePageSize + // multiple of hostarch.PageSize. + privateAllocUnit = hostarch.HugePageSize privateAllocMask = privateAllocUnit - 1 ) -func privateAligned(ar usermem.AddrRange) usermem.AddrRange { - aligned := usermem.AddrRange{ar.Start &^ privateAllocMask, ar.End} +func privateAligned(ar hostarch.AddrRange) hostarch.AddrRange { + aligned := hostarch.AddrRange{ar.Start &^ privateAllocMask, ar.End} if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End { aligned.End = end } @@ -548,7 +548,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat rseg := mm.privateRefs.refs.FindSegment(fr.Start) if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() { pma.needCOW = false - // pma.private => pma.translatePerms == usermem.AnyAccess + // pma.private => pma.translatePerms == hostarch.AnyAccess vma := vseg.ValuePtr() pma.effectivePerms = vma.effectivePerms pma.maxPerms = vma.maxPerms @@ -558,7 +558,7 @@ func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterat } // Invalidate implements memmap.MappingSpace.Invalidate. -func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) { +func (mm *MemoryManager) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -581,7 +581,7 @@ func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.Invalidate // * mm.activeMu must be locked for writing. // * ar.Length() != 0. // * ar must be page-aligned. -func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) { +func (mm *MemoryManager) invalidateLocked(ar hostarch.AddrRange, invalidatePrivate, invalidateShared bool) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -627,7 +627,7 @@ func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivat // Preconditions: // * ar.Length() != 0. // * ar must be page-aligned. -func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) ([]PinnedRange, error) { +func (mm *MemoryManager) Pin(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) ([]PinnedRange, error) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -683,7 +683,7 @@ func (mm *MemoryManager) Pin(ctx context.Context, ar usermem.AddrRange, at userm // PinnedRanges are returned by MemoryManager.Pin. type PinnedRange struct { // Source is the corresponding range of addresses. - Source usermem.AddrRange + Source hostarch.AddrRange // File is the mapped file. File memmap.File @@ -713,7 +713,7 @@ func Unpin(prs []PinnedRange) { // * !oldAR.Overlaps(newAR). // * mm.pmas.IsEmptyRange(newAR). // * oldAR and newAR must be page-aligned. -func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) { +func (mm *MemoryManager) movePMAsLocked(oldAR, newAR hostarch.AddrRange) { if checkInvariants { if !oldAR.WellFormed() || oldAR.Length() == 0 || !oldAR.IsPageAligned() { panic(fmt.Sprintf("invalid oldAR: %v", oldAR)) @@ -731,7 +731,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) { } type movedPMA struct { - oldAR usermem.AddrRange + oldAR hostarch.AddrRange pma pma } var movedPMAs []movedPMA @@ -751,7 +751,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) { pgap := mm.pmas.FindGap(newAR.Start) for i := range movedPMAs { mpma := &movedPMAs[i] - pmaNewAR := usermem.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off} + pmaNewAR := hostarch.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off} pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap() } @@ -776,7 +776,7 @@ func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) { // // Postconditions: getPMAInternalMappingsLocked does not invalidate iterators // into mm.pmas. -func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, error) { +func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) (pmaGapIterator, error) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -808,7 +808,7 @@ func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar userm // // Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators // into mm.pmas. -func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSeq) (usermem.AddrRangeSeq, error) { +func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars hostarch.AddrRangeSeq) (hostarch.AddrRangeSeq, error) { for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { ar := arsit.Head() if ar.Length() == 0 { @@ -829,7 +829,7 @@ func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSe // in ar. // * ar.Length() != 0. // * pseg.Range().Contains(ar.Start). -func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq { +func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) safemem.BlockSeq { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -866,7 +866,7 @@ func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.Add // * mm.activeMu must be locked. // * Internal mappings must have been previously established for all addresses // in ars. -func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq { +func (mm *MemoryManager) vecInternalMappingsLocked(ars hostarch.AddrRangeSeq) safemem.BlockSeq { var ims []safemem.Block for ; !ars.IsEmpty(); ars = ars.Tail() { ar := ars.Head() @@ -931,7 +931,7 @@ func (mm *MemoryManager) decPrivateRef(fr memmap.FileRange) { // MemoryManager to reflect the insertion of a pma at ar. // // Preconditions: mm.activeMu must be locked for writing. -func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) { +func (mm *MemoryManager) addRSSLocked(ar hostarch.AddrRange) { mm.curRSS += uint64(ar.Length()) if mm.curRSS > mm.maxRSS { mm.maxRSS = mm.curRSS @@ -942,19 +942,19 @@ func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) { // reflect the removal of a pma at ar. // // Preconditions: mm.activeMu must be locked for writing. -func (mm *MemoryManager) removeRSSLocked(ar usermem.AddrRange) { +func (mm *MemoryManager) removeRSSLocked(ar hostarch.AddrRange) { mm.curRSS -= uint64(ar.Length()) } // pmaSetFunctions implements segment.Functions for pmaSet. type pmaSetFunctions struct{} -func (pmaSetFunctions) MinKey() usermem.Addr { +func (pmaSetFunctions) MinKey() hostarch.Addr { return 0 } -func (pmaSetFunctions) MaxKey() usermem.Addr { - return ^usermem.Addr(0) +func (pmaSetFunctions) MaxKey() hostarch.Addr { + return ^hostarch.Addr(0) } func (pmaSetFunctions) ClearValue(pma *pma) { @@ -962,7 +962,7 @@ func (pmaSetFunctions) ClearValue(pma *pma) { pma.internalMappings = safemem.BlockSeq{} } -func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRange, pma2 pma) (pma, bool) { +func (pmaSetFunctions) Merge(ar1 hostarch.AddrRange, pma1 pma, ar2 hostarch.AddrRange, pma2 pma) (pma, bool) { if pma1.file != pma2.file || pma1.off+uint64(ar1.Length()) != pma2.off || pma1.translatePerms != pma2.translatePerms || @@ -980,7 +980,7 @@ func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRa return pma1, true } -func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (pma, pma) { +func (pmaSetFunctions) Split(ar hostarch.AddrRange, p pma, split hostarch.Addr) (pma, pma) { newlen1 := uint64(split - ar.Start) p2 := p p2.off += newlen1 @@ -997,7 +997,7 @@ func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (p // Preconditions: // * mm.activeMu must be locked. // * addr <= pgap.Start(). -func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator { +func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr hostarch.Addr, pgap pmaGapIterator) pmaIterator { if checkInvariants { if !pgap.Ok() { panic("terminal pma iterator") @@ -1045,7 +1045,7 @@ func (pseg pmaIterator) fileRange() memmap.FileRange { // Preconditions: // * pseg.Range().IsSupersetOf(ar). // * ar.Length != 0. -func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) memmap.FileRange { +func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange { if checkInvariants { if !pseg.Ok() { panic("terminal pma iterator") diff --git a/pkg/sentry/mm/procfs.go b/pkg/sentry/mm/procfs.go index 73bfbea49..f1440e884 100644 --- a/pkg/sentry/mm/procfs.go +++ b/pkg/sentry/mm/procfs.go @@ -19,9 +19,9 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" "gvisor.dev/gvisor/pkg/sentry/memmap" - "gvisor.dev/gvisor/pkg/usermem" ) const ( @@ -29,7 +29,7 @@ const ( // include/linux/kdev_t.h:MINORBITS devMinorBits = 20 - vsyscallEnd = usermem.Addr(0xffffffffff601000) + vsyscallEnd = hostarch.Addr(0xffffffffff601000) vsyscallMapsEntry = "ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n" vsyscallSmapsEntry = vsyscallMapsEntry + "Size: 4 kB\n" + @@ -62,7 +62,7 @@ func (mm *MemoryManager) NeedsUpdate(generation int64) bool { func (mm *MemoryManager) ReadMapsDataInto(ctx context.Context, buf *bytes.Buffer) { mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() - var start usermem.Addr + var start hostarch.Addr for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { mm.appendVMAMapsEntryLocked(ctx, vseg, buf) @@ -88,9 +88,9 @@ func (mm *MemoryManager) ReadMapsSeqFileData(ctx context.Context, handle seqfile mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() var data []seqfile.SeqData - var start usermem.Addr + var start hostarch.Addr if handle != nil { - start = *handle.(*usermem.Addr) + start = *handle.(*hostarch.Addr) } for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { vmaAddr := vseg.End() @@ -177,7 +177,7 @@ func (mm *MemoryManager) appendVMAMapsEntryLocked(ctx context.Context, vseg vmaI func (mm *MemoryManager) ReadSmapsDataInto(ctx context.Context, buf *bytes.Buffer) { mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() - var start usermem.Addr + var start hostarch.Addr for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { mm.vmaSmapsEntryIntoLocked(ctx, vseg, buf) @@ -196,9 +196,9 @@ func (mm *MemoryManager) ReadSmapsSeqFileData(ctx context.Context, handle seqfil mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() var data []seqfile.SeqData - var start usermem.Addr + var start hostarch.Addr if handle != nil { - start = *handle.(*usermem.Addr) + start = *handle.(*hostarch.Addr) } for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { vmaAddr := vseg.End() @@ -279,8 +279,8 @@ func (mm *MemoryManager) vmaSmapsEntryIntoLocked(ctx context.Context, vseg vmaIt // Swap is not implemented. fmt.Fprintf(b, "Swap: %8d kB\n", 0) fmt.Fprintf(b, "SwapPss: %8d kB\n", 0) - fmt.Fprintf(b, "KernelPageSize: %8d kB\n", usermem.PageSize/1024) - fmt.Fprintf(b, "MMUPageSize: %8d kB\n", usermem.PageSize/1024) + fmt.Fprintf(b, "KernelPageSize: %8d kB\n", hostarch.PageSize/1024) + fmt.Fprintf(b, "MMUPageSize: %8d kB\n", hostarch.PageSize/1024) locked := rss if vma.mlockMode == memmap.MLockNone { locked = 0 diff --git a/pkg/sentry/mm/shm.go b/pkg/sentry/mm/shm.go index 6432731d4..3130be80c 100644 --- a/pkg/sentry/mm/shm.go +++ b/pkg/sentry/mm/shm.go @@ -16,13 +16,13 @@ package mm import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/shm" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // DetachShm unmaps a sysv shared memory segment. -func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error { +func (mm *MemoryManager) DetachShm(ctx context.Context, addr hostarch.Addr) error { if addr != addr.RoundDown() { // "... shmaddr is not aligned on a page boundary." - man shmdt(2) return syserror.EINVAL @@ -52,7 +52,7 @@ func (mm *MemoryManager) DetachShm(ctx context.Context, addr usermem.Addr) error } // Remove all vmas that could have been created by the same attach. - end := addr + usermem.Addr(detached.EffectiveSize()) + end := addr + hostarch.Addr(detached.EffectiveSize()) for vseg.Ok() && vseg.End() <= end { vma := vseg.ValuePtr() if vma.mappable == detached && uint64(vseg.Start()-addr) == vma.off { diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go index 48d8b6a2b..e748b7ff8 100644 --- a/pkg/sentry/mm/special_mappable.go +++ b/pkg/sentry/mm/special_mappable.go @@ -16,11 +16,11 @@ package mm import ( "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/pgalloc" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with @@ -77,21 +77,21 @@ func (m *SpecialMappable) Msync(ctx context.Context, mr memmap.MappableRange) er } // AddMapping implements memmap.Mappable.AddMapping. -func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) error { +func (*SpecialMappable) AddMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) error { return nil } // RemoveMapping implements memmap.Mappable.RemoveMapping. -func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, uint64, bool) { +func (*SpecialMappable) RemoveMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, uint64, bool) { } // CopyMapping implements memmap.Mappable.CopyMapping. -func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, usermem.AddrRange, usermem.AddrRange, uint64, bool) error { +func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange, hostarch.AddrRange, uint64, bool) error { return nil } // Translate implements memmap.Mappable.Translate. -func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { +func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { var err error if required.End > m.fr.Length() { err = &memmap.BusError{syserror.EFAULT} @@ -102,7 +102,7 @@ func (m *SpecialMappable) Translate(ctx context.Context, required, optional memm Source: source, File: m.mfp.MemoryFile(), Offset: m.fr.Start + source.Start, - Perms: usermem.AnyAccess, + Perms: hostarch.AnyAccess, }, }, err } @@ -146,7 +146,7 @@ func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*Spec if length == 0 { return nil, syserror.EINVAL } - alignedLen, ok := usermem.Addr(length).RoundUp() + alignedLen, ok := hostarch.Addr(length).RoundUp() if !ok { return nil, syserror.EINVAL } diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go index 69e37330b..7ad6b7c21 100644 --- a/pkg/sentry/mm/syscalls.go +++ b/pkg/sentry/mm/syscalls.go @@ -21,20 +21,20 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/kernel/futex" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // HandleUserFault handles an application page fault. sp is the faulting // application thread's stack pointer. // // Preconditions: mm.as != nil. -func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr, at usermem.AccessType, sp usermem.Addr) error { - ar, ok := addr.RoundDown().ToRange(usermem.PageSize) +func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error { + ar, ok := addr.RoundDown().ToRange(hostarch.PageSize) if !ok { return syserror.EFAULT } @@ -72,11 +72,11 @@ func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr, } // MMap establishes a memory mapping. -func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error) { +func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) { if opts.Length == 0 { return 0, syserror.EINVAL } - length, ok := usermem.Addr(opts.Length).RoundUp() + length, ok := hostarch.Addr(opts.Length).RoundUp() if !ok { return 0, syserror.ENOMEM } @@ -84,7 +84,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme if opts.Mappable != nil { // Offset must be aligned. - if usermem.Addr(opts.Offset).RoundDown() != usermem.Addr(opts.Offset) { + if hostarch.Addr(opts.Offset).RoundDown() != hostarch.Addr(opts.Offset) { return 0, syserror.EINVAL } // Offset + length must not overflow. @@ -157,7 +157,7 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (userme // Preconditions: // * mm.mappingMu must be locked. // * vseg.Range().IsSupersetOf(ar). -func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) { +func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) { if !vseg.ValuePtr().effectivePerms.Any() { // Linux doesn't populate inaccessible pages. See // mm/gup.c:populate_vma_page_range. @@ -175,7 +175,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u } // Ensure that we have usable pmas. - pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess) + pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess) if err != nil { // mm/util.c:vm_mmap_pgoff() ignores the error, if any, from // mm/gup.c:mm_populate(). If it matters, we'll get it again when @@ -203,7 +203,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar u // * vseg.Range().IsSupersetOf(ar). // // Postconditions: mm.mappingMu will be unlocked. -func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) { +func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) { // See populateVMA above for commentary. if !vseg.ValuePtr().effectivePerms.Any() { mm.mappingMu.Unlock() @@ -221,7 +221,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera // mm.mappingMu doesn't need to be write-locked for getPMAsLocked, and it // isn't needed at all for mapASLocked. mm.mappingMu.DowngradeLock() - pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, usermem.NoAccess) + pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, hostarch.NoAccess) mm.mappingMu.RUnlock() if err != nil { mm.activeMu.Unlock() @@ -234,7 +234,7 @@ func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaItera } // MapStack allocates the initial process stack. -func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error) { +func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, error) { // maxStackSize is the maximum supported process stack size in bytes. // // This limit exists because stack growing isn't implemented, so the entire @@ -242,7 +242,7 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error const maxStackSize = 128 << 20 stackSize := limits.FromContext(ctx).Get(limits.Stack) - r, ok := usermem.Addr(stackSize.Cur).RoundUp() + r, ok := hostarch.Addr(stackSize.Cur).RoundUp() sz := uint64(r) if !ok { // RLIM_INFINITY rounds up to 0. @@ -251,16 +251,16 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error ctx.Warningf("Capping stack size from RLIMIT_STACK of %v down to %v.", sz, maxStackSize) sz = maxStackSize } else if sz == 0 { - return usermem.AddrRange{}, syserror.ENOMEM + return hostarch.AddrRange{}, syserror.ENOMEM } - szaddr := usermem.Addr(sz) + szaddr := hostarch.Addr(sz) ctx.Debugf("Allocating stack with size of %v bytes", sz) // Determine the stack's desired location. Unlike Linux, address // randomization can't be disabled. - stackEnd := mm.layout.MaxAddr - usermem.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown() + stackEnd := mm.layout.MaxAddr - hostarch.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown() if stackEnd < szaddr { - return usermem.AddrRange{}, syserror.ENOMEM + return hostarch.AddrRange{}, syserror.ENOMEM } stackStart := stackEnd - szaddr mm.mappingMu.Lock() @@ -268,8 +268,8 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error _, ar, err := mm.createVMALocked(ctx, memmap.MMapOpts{ Length: sz, Addr: stackStart, - Perms: usermem.ReadWrite, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.ReadWrite, + MaxPerms: hostarch.AnyAccess, Private: true, GrowsDown: true, MLockMode: mm.defMLockMode, @@ -279,14 +279,14 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error } // MUnmap implements the semantics of Linux's munmap(2). -func (mm *MemoryManager) MUnmap(ctx context.Context, addr usermem.Addr, length uint64) error { +func (mm *MemoryManager) MUnmap(ctx context.Context, addr hostarch.Addr, length uint64) error { if addr != addr.RoundDown() { return syserror.EINVAL } if length == 0 { return syserror.EINVAL } - la, ok := usermem.Addr(length).RoundUp() + la, ok := hostarch.Addr(length).RoundUp() if !ok { return syserror.EINVAL } @@ -308,7 +308,7 @@ type MRemapOpts struct { // NewAddr is the new address for the remapping. NewAddr is ignored unless // Move is MMRemapMustMove. - NewAddr usermem.Addr + NewAddr hostarch.Addr } // MRemapMoveMode controls MRemap's moving behavior. @@ -328,7 +328,7 @@ const ( ) // MRemap implements the semantics of Linux's mremap(2). -func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (usermem.Addr, error) { +func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (hostarch.Addr, error) { // "Note that old_address has to be page aligned." - mremap(2) if oldAddr.RoundDown() != oldAddr { return 0, syserror.EINVAL @@ -336,9 +336,9 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi // Linux treats an old_size that rounds up to 0 as 0, which is otherwise a // valid size. However, new_size can't be 0 after rounding. - oldSizeAddr, _ := usermem.Addr(oldSize).RoundUp() + oldSizeAddr, _ := hostarch.Addr(oldSize).RoundUp() oldSize = uint64(oldSizeAddr) - newSizeAddr, ok := usermem.Addr(newSize).RoundUp() + newSizeAddr, ok := hostarch.Addr(newSize).RoundUp() if !ok || newSizeAddr == 0 { return 0, syserror.EINVAL } @@ -392,8 +392,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi if newSize < oldSize { // If oldAddr+oldSize didn't overflow, oldAddr+newSize can't // either. - newEnd := oldAddr + usermem.Addr(newSize) - mm.unmapLocked(ctx, usermem.AddrRange{newEnd, oldEnd}) + newEnd := oldAddr + hostarch.Addr(newSize) + mm.unmapLocked(ctx, hostarch.AddrRange{newEnd, oldEnd}) } return oldAddr, nil } @@ -438,7 +438,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi } // Find a location for the new mapping. - var newAR usermem.AddrRange + var newAR hostarch.AddrRange switch opts.Move { case MRemapMayMove: newAddr, err := mm.findAvailableLocked(newSize, findAvailableOpts{}) @@ -457,7 +457,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi if !ok { return 0, syserror.EINVAL } - if (usermem.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) { + if (hostarch.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) { return 0, syserror.EINVAL } @@ -479,8 +479,8 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi // correct: compare Linux's mm/mremap.c:mremap_to() => do_munmap(), // vma_to_resize(). if newSize < oldSize { - oldNewEnd := oldAddr + usermem.Addr(newSize) - mm.unmapLocked(ctx, usermem.AddrRange{oldNewEnd, oldEnd}) + oldNewEnd := oldAddr + hostarch.Addr(newSize) + mm.unmapLocked(ctx, hostarch.AddrRange{oldNewEnd, oldEnd}) oldEnd = oldNewEnd } @@ -488,7 +488,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi vseg = mm.vmas.FindSegment(oldAddr) } - oldAR := usermem.AddrRange{oldAddr, oldEnd} + oldAR := hostarch.AddrRange{oldAddr, oldEnd} // Check that oldEnd maps to the same vma as oldAddr. if vseg.End() < oldEnd { @@ -588,14 +588,14 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSi } // MProtect implements the semantics of Linux's mprotect(2). -func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms usermem.AccessType, growsDown bool) error { +func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms hostarch.AccessType, growsDown bool) error { if addr.RoundDown() != addr { return syserror.EINVAL } if length == 0 { return nil } - rlength, ok := usermem.Addr(length).RoundUp() + rlength, ok := hostarch.Addr(length).RoundUp() if !ok { return syserror.ENOMEM } @@ -692,19 +692,19 @@ func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms us } // BrkSetup sets mm's brk address to addr and its brk size to 0. -func (mm *MemoryManager) BrkSetup(ctx context.Context, addr usermem.Addr) { +func (mm *MemoryManager) BrkSetup(ctx context.Context, addr hostarch.Addr) { mm.mappingMu.Lock() defer mm.mappingMu.Unlock() // Unmap the existing brk. if mm.brk.Length() != 0 { mm.unmapLocked(ctx, mm.brk) } - mm.brk = usermem.AddrRange{addr, addr} + mm.brk = hostarch.AddrRange{addr, addr} } // Brk implements the semantics of Linux's brk(2), except that it returns an // error on failure. -func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Addr, error) { +func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.Addr, error) { mm.mappingMu.Lock() // Can't defer mm.mappingMu.Unlock(); see below. @@ -741,8 +741,8 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad Fixed: true, // Compare Linux's // arch/x86/include/asm/page_types.h:VM_DATA_DEFAULT_FLAGS. - Perms: usermem.ReadWrite, - MaxPerms: usermem.AnyAccess, + Perms: hostarch.ReadWrite, + MaxPerms: hostarch.AnyAccess, Private: true, // Linux: mm/mmap.c:sys_brk() => do_brk_flags() includes // mm->def_flags. @@ -762,7 +762,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad } case newbrkpg < oldbrkpg: - mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg}) + mm.unmapLocked(ctx, hostarch.AddrRange{newbrkpg, oldbrkpg}) fallthrough default: @@ -775,9 +775,9 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Ad // MLock implements the semantics of Linux's mlock()/mlock2()/munlock(), // depending on mode. -func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length uint64, mode memmap.MLockMode) error { +func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length uint64, mode memmap.MLockMode) error { // Linux allows this to overflow. - la, _ := usermem.Addr(length + addr.PageOffset()).RoundUp() + la, _ := hostarch.Addr(length + addr.PageOffset()).RoundUp() ar, ok := addr.RoundDown().ToRange(uint64(la)) if !ok { return syserror.EINVAL @@ -850,7 +850,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr usermem.Addr, length ui mm.mappingMu.RUnlock() return syserror.ENOMEM } - _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), usermem.NoAccess) + _, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), hostarch.NoAccess) if err != nil { mm.activeMu.Unlock() mm.mappingMu.RUnlock() @@ -945,7 +945,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error mm.mappingMu.DowngradeLock() for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() { if vseg.ValuePtr().effectivePerms.Any() { - mm.getPMAsLocked(ctx, vseg, vseg.Range(), usermem.NoAccess) + mm.getPMAsLocked(ctx, vseg, vseg.Range(), hostarch.NoAccess) } } @@ -965,7 +965,7 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error } // NumaPolicy implements the semantics of Linux's get_mempolicy(MPOL_F_ADDR). -func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64, error) { +func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint64, error) { mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() vseg := mm.vmas.FindSegment(addr) @@ -977,12 +977,12 @@ func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (linux.NumaPolicy, uint64 } // SetNumaPolicy implements the semantics of Linux's mbind(). -func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error { +func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error { if !addr.IsPageAligned() { return syserror.EINVAL } // Linux allows this to overflow. - la, _ := usermem.Addr(length).RoundUp() + la, _ := hostarch.Addr(length).RoundUp() ar, ok := addr.ToRange(uint64(la)) if !ok { return syserror.EINVAL @@ -1018,7 +1018,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy } // SetDontFork implements the semantics of madvise MADV_DONTFORK. -func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork bool) error { +func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork bool) error { ar, ok := addr.ToRange(length) if !ok { return syserror.EINVAL @@ -1044,7 +1044,7 @@ func (mm *MemoryManager) SetDontFork(addr usermem.Addr, length uint64, dontfork } // Decommit implements the semantics of Linux's madvise(MADV_DONTNEED). -func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error { +func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error { ar, ok := addr.ToRange(length) if !ok { return syserror.EINVAL @@ -1112,14 +1112,14 @@ type MSyncOpts struct { } // MSync implements the semantics of Linux's msync(). -func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length uint64, opts MSyncOpts) error { +func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length uint64, opts MSyncOpts) error { if addr != addr.RoundDown() { return syserror.EINVAL } if length == 0 { return nil } - la, ok := usermem.Addr(length).RoundUp() + la, ok := hostarch.Addr(length).RoundUp() if !ok { return syserror.ENOMEM } @@ -1188,7 +1188,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr usermem.Addr, length ui } // GetSharedFutexKey is used by kernel.Task.GetSharedKey. -func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Addr) (futex.Key, error) { +func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) { ar, ok := addr.ToRange(4) // sizeof(int32). if !ok { return futex.Key{}, syserror.EFAULT @@ -1196,7 +1196,7 @@ func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr usermem.Add mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() - vseg, _, err := mm.getVMAsLocked(ctx, ar, usermem.Read, false) + vseg, _, err := mm.getVMAsLocked(ctx, ar, hostarch.Read, false) if err != nil { return futex.Key{}, err } @@ -1230,7 +1230,7 @@ func (mm *MemoryManager) VirtualMemorySize() uint64 { // VirtualMemorySizeRange returns the combined length in bytes of all mappings // in ar in mm. -func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 { +func (mm *MemoryManager) VirtualMemorySizeRange(ar hostarch.AddrRange) uint64 { mm.mappingMu.RLock() defer mm.mappingMu.RUnlock() return uint64(mm.vmas.SpanRange(ar)) diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go index b8df72813..0d019e41d 100644 --- a/pkg/sentry/mm/vma.go +++ b/pkg/sentry/mm/vma.go @@ -19,18 +19,18 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // Preconditions: // * mm.mappingMu must be locked for writing. // * opts must be valid as defined by the checks in MMap. -func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) { +func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, hostarch.AddrRange, error) { if opts.MaxPerms != opts.MaxPerms.Effective() { panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms)) } @@ -47,7 +47,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp if opts.Force && opts.Unmap && opts.Fixed { addr = opts.Addr } else { - return vmaIterator{}, usermem.AddrRange{}, err + return vmaIterator{}, hostarch.AddrRange{}, err } } ar, _ := addr.ToRange(opts.Length) @@ -58,7 +58,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp newUsageAS -= uint64(mm.vmas.SpanRange(ar)) } if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS { - return vmaIterator{}, usermem.AddrRange{}, syserror.ENOMEM + return vmaIterator{}, hostarch.AddrRange{}, syserror.ENOMEM } if opts.MLockMode != memmap.MLockNone { @@ -66,14 +66,14 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) { mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur if mlockLimit == 0 { - return vmaIterator{}, usermem.AddrRange{}, syserror.EPERM + return vmaIterator{}, hostarch.AddrRange{}, syserror.EPERM } newLockedAS := mm.lockedAS + opts.Length if opts.Unmap { newLockedAS -= mm.mlockedBytesRangeLocked(ar) } if newLockedAS > mlockLimit { - return vmaIterator{}, usermem.AddrRange{}, syserror.EAGAIN + return vmaIterator{}, hostarch.AddrRange{}, syserror.EAGAIN } } } @@ -93,7 +93,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp // The expression for writable is vma.canWriteMappableLocked(), but we // don't yet have a vma. if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset, !opts.Private && opts.MaxPerms.Write); err != nil { - return vmaIterator{}, usermem.AddrRange{}, err + return vmaIterator{}, hostarch.AddrRange{}, err } } @@ -137,7 +137,7 @@ type findAvailableOpts struct { // // - Unmap allows existing guard pages in the returned range. - Addr usermem.Addr + Addr hostarch.Addr Fixed bool Unmap bool Map32Bit bool @@ -153,13 +153,13 @@ const ( // findAvailableLocked finds an allocatable range. // // Preconditions: mm.mappingMu must be locked. -func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (usermem.Addr, error) { +func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (hostarch.Addr, error) { if opts.Fixed { opts.Map32Bit = false } allowedAR := mm.applicationAddrRange() if opts.Map32Bit { - allowedAR = allowedAR.Intersect(usermem.AddrRange{map32Start, map32End}) + allowedAR = allowedAR.Intersect(hostarch.AddrRange{map32Start, map32End}) } // Does the provided suggestion work? @@ -181,33 +181,33 @@ func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOp } // Prefer hugepage alignment if a hugepage or more is requested. - alignment := uint64(usermem.PageSize) - if length >= usermem.HugePageSize { - alignment = usermem.HugePageSize + alignment := uint64(hostarch.PageSize) + if length >= hostarch.HugePageSize { + alignment = hostarch.HugePageSize } if opts.Map32Bit { return mm.findLowestAvailableLocked(length, alignment, allowedAR) } if mm.layout.DefaultDirection == arch.MmapBottomUp { - return mm.findLowestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr}) + return mm.findLowestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr}) } - return mm.findHighestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase}) + return mm.findHighestAvailableLocked(length, alignment, hostarch.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase}) } -func (mm *MemoryManager) applicationAddrRange() usermem.AddrRange { - return usermem.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr} +func (mm *MemoryManager) applicationAddrRange() hostarch.AddrRange { + return hostarch.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr} } // Preconditions: mm.mappingMu must be locked. -func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) { - for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(usermem.Addr(length)) { +func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) { + for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(hostarch.Addr(length)) { if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length { // Can we shift up to match the alignment? if offset := uint64(gr.Start) % alignment; offset != 0 { if uint64(gr.Length()) >= length+alignment-offset { // Yes, we're aligned. - return gr.Start + usermem.Addr(alignment-offset), nil + return gr.Start + hostarch.Addr(alignment-offset), nil } } @@ -219,15 +219,15 @@ func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bou } // Preconditions: mm.mappingMu must be locked. -func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) { - for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(usermem.Addr(length)) { +func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds hostarch.AddrRange) (hostarch.Addr, error) { + for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(hostarch.Addr(length)) { if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length { // Can we shift down to match the alignment? - start := gr.End - usermem.Addr(length) + start := gr.End - hostarch.Addr(length) if offset := uint64(start) % alignment; offset != 0 { - if gr.Start <= start-usermem.Addr(offset) { + if gr.Start <= start-hostarch.Addr(offset) { // Yes, we're aligned. - return start - usermem.Addr(offset), nil + return start - hostarch.Addr(offset), nil } } @@ -239,7 +239,7 @@ func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bo } // Preconditions: mm.mappingMu must be locked. -func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 { +func (mm *MemoryManager) mlockedBytesRangeLocked(ar hostarch.AddrRange) uint64 { var total uint64 for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() { if vseg.ValuePtr().mlockMode != memmap.MLockNone { @@ -264,7 +264,7 @@ func (mm *MemoryManager) mlockedBytesRangeLocked(ar usermem.AddrRange) uint64 { // Preconditions: // * mm.mappingMu must be locked for reading; it may be temporarily unlocked. // * ar.Length() != 0. -func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) { +func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -320,7 +320,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange // temporarily unlocked. // // Postconditions: ars is not mutated. -func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool) (usermem.AddrRangeSeq, error) { +func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool) (hostarch.AddrRangeSeq, error) { for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { ar := arsit.Head() if ar.Length() == 0 { @@ -339,7 +339,7 @@ func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrR // // guardBytes is equivalent to Linux's stack_guard_gap after upstream // 1be7107fbe18 "mm: larger stack guard gap, between vmas". -const guardBytes = 256 * usermem.PageSize +const guardBytes = 256 * hostarch.PageSize // unmapLocked unmaps all addresses in ar and returns the resulting gap in // mm.vmas. @@ -348,7 +348,7 @@ const guardBytes = 256 * usermem.PageSize // * mm.mappingMu must be locked for writing. // * ar.Length() != 0. // * ar must be page-aligned. -func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator { +func (mm *MemoryManager) unmapLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -369,7 +369,7 @@ func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) // * mm.mappingMu must be locked for writing. // * ar.Length() != 0. // * ar must be page-aligned. -func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator { +func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar hostarch.AddrRange) vmaGapIterator { if checkInvariants { if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() { panic(fmt.Sprintf("invalid ar: %v", ar)) @@ -426,12 +426,12 @@ func (vma *vma) isPrivateDataLocked() bool { // vmaSetFunctions implements segment.Functions for vmaSet. type vmaSetFunctions struct{} -func (vmaSetFunctions) MinKey() usermem.Addr { +func (vmaSetFunctions) MinKey() hostarch.Addr { return 0 } -func (vmaSetFunctions) MaxKey() usermem.Addr { - return ^usermem.Addr(0) +func (vmaSetFunctions) MaxKey() hostarch.Addr { + return ^hostarch.Addr(0) } func (vmaSetFunctions) ClearValue(vma *vma) { @@ -440,7 +440,7 @@ func (vmaSetFunctions) ClearValue(vma *vma) { vma.hint = "" } -func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRange, vma2 vma) (vma, bool) { +func (vmaSetFunctions) Merge(ar1 hostarch.AddrRange, vma1 vma, ar2 hostarch.AddrRange, vma2 vma) (vma, bool) { if vma1.mappable != vma2.mappable || (vma1.mappable != nil && vma1.off+uint64(ar1.Length()) != vma2.off) || vma1.realPerms != vma2.realPerms || @@ -462,7 +462,7 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa return vma1, true } -func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (vma, vma) { +func (vmaSetFunctions) Split(ar hostarch.AddrRange, v vma, split hostarch.Addr) (vma, vma) { v2 := v if v2.mappable != nil { v2.off += uint64(split - ar.Start) @@ -476,7 +476,7 @@ func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (v // Preconditions: // * vseg.ValuePtr().mappable != nil. // * vseg.Range().Contains(addr). -func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 { +func (vseg vmaIterator) mappableOffsetAt(addr hostarch.Addr) uint64 { if checkInvariants { if !vseg.Ok() { panic("terminal vma iterator") @@ -503,7 +503,7 @@ func (vseg vmaIterator) mappableRange() memmap.MappableRange { // * vseg.ValuePtr().mappable != nil. // * vseg.Range().IsSupersetOf(ar). // * ar.Length() != 0. -func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange { +func (vseg vmaIterator) mappableRangeOf(ar hostarch.AddrRange) memmap.MappableRange { if checkInvariants { if !vseg.Ok() { panic("terminal vma iterator") @@ -528,7 +528,7 @@ func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRan // * vseg.ValuePtr().mappable != nil. // * vseg.mappableRange().IsSupersetOf(mr). // * mr.Length() != 0. -func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange { +func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) hostarch.AddrRange { if checkInvariants { if !vseg.Ok() { panic("terminal vma iterator") @@ -546,7 +546,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange { vma := vseg.ValuePtr() vstart := vseg.Start() - return usermem.AddrRange{vstart + usermem.Addr(mr.Start-vma.off), vstart + usermem.Addr(mr.End-vma.off)} + return hostarch.AddrRange{vstart + hostarch.Addr(mr.Start-vma.off), vstart + hostarch.Addr(mr.End-vma.off)} } // seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by @@ -555,7 +555,7 @@ func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange { // Preconditions: // * mm.mappingMu must be locked. // * addr >= vseg.Start(). -func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator { +func (vseg vmaIterator) seekNextLowerBound(addr hostarch.Addr) vmaIterator { if checkInvariants { if !vseg.Ok() { panic("terminal vma iterator") @@ -572,7 +572,7 @@ func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator { // availableRange returns the subset of vgap.Range() in which new vmas may be // created without MMapOpts.Unmap == true. -func (vgap vmaGapIterator) availableRange() usermem.AddrRange { +func (vgap vmaGapIterator) availableRange() hostarch.AddrRange { ar := vgap.Range() next := vgap.NextSegment() if !next.Ok() || !next.ValuePtr().growsDown { @@ -580,7 +580,7 @@ func (vgap vmaGapIterator) availableRange() usermem.AddrRange { } // Exclude guard pages. if ar.Length() < guardBytes { - return usermem.AddrRange{ar.Start, ar.Start} + return hostarch.AddrRange{ar.Start, ar.Start} } ar.End -= guardBytes return ar diff --git a/pkg/sentry/pgalloc/BUILD b/pkg/sentry/pgalloc/BUILD index e5bf13c40..57d73d770 100644 --- a/pkg/sentry/pgalloc/BUILD +++ b/pkg/sentry/pgalloc/BUILD @@ -85,6 +85,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/memutil", "//pkg/safemem", @@ -106,5 +107,5 @@ go_test( size = "small", srcs = ["pgalloc_test.go"], library = ":pgalloc", - deps = ["//pkg/usermem"], + deps = ["//pkg/hostarch"], ) diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go index a4af3e21b..b81292c46 100644 --- a/pkg/sentry/pgalloc/pgalloc.go +++ b/pkg/sentry/pgalloc/pgalloc.go @@ -31,6 +31,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/safemem" "gvisor.dev/gvisor/pkg/sentry/hostmm" @@ -38,7 +39,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // MemoryFile is a memmap.File whose pages may be allocated to arbitrary @@ -283,7 +283,7 @@ const ( chunkMask = chunkSize - 1 // maxPage is the highest 64-bit page. - maxPage = math.MaxUint64 &^ (usermem.PageSize - 1) + maxPage = math.MaxUint64 &^ (hostarch.PageSize - 1) ) // NewMemoryFile creates a MemoryFile backed by the given file. If @@ -344,7 +344,7 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) { m, _, errno := unix.Syscall6( unix.SYS_MMAP, 0, - usermem.PageSize, + hostarch.PageSize, unix.PROT_EXEC, unix.MAP_SHARED, file.Fd(), @@ -357,7 +357,7 @@ func NewMemoryFile(file *os.File, opts MemoryFileOpts) (*MemoryFile, error) { if _, _, errno := unix.Syscall( unix.SYS_MUNMAP, m, - usermem.PageSize, + hostarch.PageSize, 0); errno != 0 { panic(fmt.Sprintf("failed to unmap PROT_EXEC MemoryFile mapping: %v", errno)) } @@ -386,7 +386,7 @@ func (f *MemoryFile) Destroy() { // // Preconditions: length must be page-aligned and non-zero. func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (memmap.FileRange, error) { - if length == 0 || length%usermem.PageSize != 0 { + if length == 0 || length%hostarch.PageSize != 0 { panic(fmt.Sprintf("invalid allocation length: %#x", length)) } @@ -395,9 +395,9 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (memmap.File // Align hugepage-and-larger allocations on hugepage boundaries to try // to take advantage of hugetmpfs. - alignment := uint64(usermem.PageSize) - if length >= usermem.HugePageSize { - alignment = usermem.HugePageSize + alignment := uint64(hostarch.PageSize) + if length >= hostarch.HugePageSize { + alignment = hostarch.HugePageSize } // Find a range in the underlying file. @@ -524,13 +524,13 @@ func (f *MemoryFile) AllocateAndFill(length uint64, kind usage.MemoryKind, r saf if err != nil { return memmap.FileRange{}, err } - dsts, err := f.MapInternal(fr, usermem.Write) + dsts, err := f.MapInternal(fr, hostarch.Write) if err != nil { f.DecRef(fr) return memmap.FileRange{}, err } n, err := safemem.ReadFullToBlocks(r, dsts) - un := uint64(usermem.Addr(n).RoundDown()) + un := uint64(hostarch.Addr(n).RoundDown()) if un < length { // Free unused memory and update fr to contain only the memory that is // still allocated. @@ -552,7 +552,7 @@ const ( // // Preconditions: fr.Length() > 0. func (f *MemoryFile) Decommit(fr memmap.FileRange) error { - if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 { panic(fmt.Sprintf("invalid range: %v", fr)) } @@ -614,7 +614,7 @@ func (f *MemoryFile) markDecommitted(fr memmap.FileRange) { // IncRef implements memmap.File.IncRef. func (f *MemoryFile) IncRef(fr memmap.FileRange) { - if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 { panic(fmt.Sprintf("invalid range: %v", fr)) } @@ -633,7 +633,7 @@ func (f *MemoryFile) IncRef(fr memmap.FileRange) { // DecRef implements memmap.File.DecRef. func (f *MemoryFile) DecRef(fr memmap.FileRange) { - if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%hostarch.PageSize != 0 || fr.End%hostarch.PageSize != 0 { panic(fmt.Sprintf("invalid range: %v", fr)) } @@ -669,7 +669,7 @@ func (f *MemoryFile) DecRef(fr memmap.FileRange) { } // MapInternal implements memmap.File.MapInternal. -func (f *MemoryFile) MapInternal(fr memmap.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { +func (f *MemoryFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) { if !fr.WellFormed() || fr.Length() == 0 { panic(fmt.Sprintf("invalid range: %v", fr)) } @@ -935,7 +935,7 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func( // Ensure that we have sufficient buffer for the call // (one byte per page). The length of each slice must // be page-aligned. - bufLen := len(s) / usermem.PageSize + bufLen := len(s) / hostarch.PageSize if len(buf) < bufLen { buf = make([]byte, bufLen) } @@ -967,8 +967,8 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func( } } committedFR := memmap.FileRange{ - Start: r.Start + uint64(i*usermem.PageSize), - End: r.Start + uint64(j*usermem.PageSize), + Start: r.Start + uint64(i*hostarch.PageSize), + End: r.Start + uint64(j*hostarch.PageSize), } // Advance seg to committedFR.Start. for seg.Ok() && seg.End() < committedFR.Start { diff --git a/pkg/sentry/pgalloc/pgalloc_test.go b/pkg/sentry/pgalloc/pgalloc_test.go index 405db141f..8d2b7eb5e 100644 --- a/pkg/sentry/pgalloc/pgalloc_test.go +++ b/pkg/sentry/pgalloc/pgalloc_test.go @@ -17,12 +17,12 @@ package pgalloc import ( "testing" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) const ( - page = usermem.PageSize - hugepage = usermem.HugePageSize + page = hostarch.PageSize + hugepage = hostarch.HugePageSize topPage = (1 << 63) - page ) diff --git a/pkg/sentry/pgalloc/save_restore.go b/pkg/sentry/pgalloc/save_restore.go index e05c8d074..345cdde55 100644 --- a/pkg/sentry/pgalloc/save_restore.go +++ b/pkg/sentry/pgalloc/save_restore.go @@ -23,11 +23,11 @@ import ( "sync/atomic" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/usage" "gvisor.dev/gvisor/pkg/state" "gvisor.dev/gvisor/pkg/state/wire" - "gvisor.dev/gvisor/pkg/usermem" ) // SaveTo writes f's state to the given stream. @@ -49,11 +49,11 @@ func (f *MemoryFile) SaveTo(ctx context.Context, w wire.Writer) error { // Ensure that all pages that contain data have knownCommitted set, since // we only store knownCommitted pages below. - zeroPage := make([]byte, usermem.PageSize) + zeroPage := make([]byte, hostarch.PageSize) err := f.updateUsageLocked(0, func(bs []byte, committed []byte) error { - for pgoff := 0; pgoff < len(bs); pgoff += usermem.PageSize { - i := pgoff / usermem.PageSize - pg := bs[pgoff : pgoff+usermem.PageSize] + for pgoff := 0; pgoff < len(bs); pgoff += hostarch.PageSize { + i := pgoff / hostarch.PageSize + pg := bs[pgoff : pgoff+hostarch.PageSize] if !bytes.Equal(pg, zeroPage) { committed[i] = 1 continue diff --git a/pkg/sentry/platform/BUILD b/pkg/sentry/platform/BUILD index db7d55ef2..7125657b3 100644 --- a/pkg/sentry/platform/BUILD +++ b/pkg/sentry/platform/BUILD @@ -13,6 +13,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/seccomp", "//pkg/sentry/arch", "//pkg/sentry/hostmm", diff --git a/pkg/sentry/platform/kvm/BUILD b/pkg/sentry/platform/kvm/BUILD index 03a76eb9b..f04898dc1 100644 --- a/pkg/sentry/platform/kvm/BUILD +++ b/pkg/sentry/platform/kvm/BUILD @@ -43,6 +43,7 @@ go_library( "//pkg/atomicbitops", "//pkg/context", "//pkg/cpuid", + "//pkg/hostarch", "//pkg/log", "//pkg/procid", "//pkg/ring0", @@ -56,7 +57,6 @@ go_library( "//pkg/sentry/platform/interrupt", "//pkg/sentry/time", "//pkg/sync", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) @@ -76,6 +76,7 @@ go_test( "requires-kvm", ], deps = [ + "//pkg/hostarch", "//pkg/ring0", "//pkg/ring0/pagetables", "//pkg/sentry/arch", @@ -83,7 +84,6 @@ go_test( "//pkg/sentry/platform", "//pkg/sentry/platform/kvm/testutil", "//pkg/sentry/time", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/platform/kvm/address_space.go b/pkg/sentry/platform/kvm/address_space.go index 25c21e843..5524e8727 100644 --- a/pkg/sentry/platform/kvm/address_space.go +++ b/pkg/sentry/platform/kvm/address_space.go @@ -18,11 +18,11 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/atomicbitops" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // dirtySet tracks vCPUs for invalidation. @@ -118,7 +118,7 @@ type hostMapEntry struct { // +checkescape:hard,stack // //go:nosplit -func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) { +func (as *addressSpace) mapLocked(addr hostarch.Addr, m hostMapEntry, at hostarch.AccessType) (inv bool) { for m.length > 0 { physical, length, ok := translateToPhysical(m.addr) if !ok { @@ -144,14 +144,14 @@ func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem. }, physical) || inv m.addr += length m.length -= length - addr += usermem.Addr(length) + addr += hostarch.Addr(length) } return inv } // MapFile implements platform.AddressSpace.MapFile. -func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error { +func (as *addressSpace) MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error { as.mu.Lock() defer as.mu.Unlock() @@ -165,7 +165,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // We don't execute from application file-mapped memory, and guest page // tables don't care if we have execute permission (but they do need pages // to be readable). - bs, err := f.MapInternal(fr, usermem.AccessType{ + bs, err := f.MapInternal(fr, hostarch.AccessType{ Read: at.Read || at.Execute || precommit, Write: at.Write, }) @@ -187,7 +187,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // lookup in our host page tables for this translation. if precommit { s := b.ToSlice() - for i := 0; i < len(s); i += usermem.PageSize { + for i := 0; i < len(s); i += hostarch.PageSize { _ = s[i] // Touch to commit. } } @@ -201,7 +201,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File length: uintptr(b.Len()), }, at) inv = inv || prev - addr += usermem.Addr(b.Len()) + addr += hostarch.Addr(b.Len()) } if inv { as.invalidate() @@ -215,12 +215,12 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // +checkescape:hard,stack // //go:nosplit -func (as *addressSpace) unmapLocked(addr usermem.Addr, length uint64) bool { +func (as *addressSpace) unmapLocked(addr hostarch.Addr, length uint64) bool { return as.pageTables.Unmap(addr, uintptr(length)) } // Unmap unmaps the given range by calling pagetables.PageTables.Unmap. -func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) { +func (as *addressSpace) Unmap(addr hostarch.Addr, length uint64) { as.mu.Lock() defer as.mu.Unlock() diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go index 37c53fa02..28a613a54 100644 --- a/pkg/sentry/platform/kvm/bluepill_fault.go +++ b/pkg/sentry/platform/kvm/bluepill_fault.go @@ -18,7 +18,7 @@ import ( "sync/atomic" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) const ( @@ -47,7 +47,7 @@ func yield() { // //go:nosplit func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virtualStart, physicalStart, length uintptr, ok bool) { - alignedPhysical := physical &^ uintptr(usermem.PageSize-1) + alignedPhysical := physical &^ uintptr(hostarch.PageSize-1) for _, pr := range phyRegions { end := pr.physical + pr.length if physical < pr.physical || physical >= end { diff --git a/pkg/sentry/platform/kvm/context.go b/pkg/sentry/platform/kvm/context.go index 706fa53dc..f4d4473a8 100644 --- a/pkg/sentry/platform/kvm/context.go +++ b/pkg/sentry/platform/kvm/context.go @@ -18,11 +18,11 @@ import ( "sync/atomic" pkgcontext "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sentry/platform/interrupt" - "gvisor.dev/gvisor/pkg/usermem" ) // context is an implementation of the platform context. @@ -40,7 +40,7 @@ type context struct { } // Switch runs the provided context in the given address space. -func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, usermem.AccessType, error) { +func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, hostarch.AccessType, error) { as := mm.AddressSpace() localAS := as.(*addressSpace) @@ -50,7 +50,7 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a // Enable interrupts (i.e. calls to vCPU.Notify). if !c.interrupt.Enable(cpu) { c.machine.Put(cpu) // Already preempted. - return nil, usermem.NoAccess, platform.ErrContextInterrupt + return nil, hostarch.NoAccess, platform.ErrContextInterrupt } // Set the active address space. diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go index 92c05a9ad..aac0fdffe 100644 --- a/pkg/sentry/platform/kvm/kvm.go +++ b/pkg/sentry/platform/kvm/kvm.go @@ -20,11 +20,11 @@ import ( "os" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // userMemoryRegion is a region of physical memory. @@ -146,13 +146,13 @@ func (*KVM) MapUnit() uint64 { } // MinUserAddress returns the lowest available address. -func (*KVM) MinUserAddress() usermem.Addr { - return usermem.PageSize +func (*KVM) MinUserAddress() hostarch.Addr { + return hostarch.PageSize } // MaxUserAddress returns the first address that may not be used. -func (*KVM) MaxUserAddress() usermem.Addr { - return usermem.Addr(ring0.MaximumUserAddress) +func (*KVM) MaxUserAddress() hostarch.Addr { + return hostarch.Addr(ring0.MaximumUserAddress) } // NewAddressSpace returns a new pagetable root. diff --git a/pkg/sentry/platform/kvm/kvm_test.go b/pkg/sentry/platform/kvm/kvm_test.go index 5bce16dde..ceff09a60 100644 --- a/pkg/sentry/platform/kvm/kvm_test.go +++ b/pkg/sentry/platform/kvm/kvm_test.go @@ -22,6 +22,7 @@ import ( "time" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" @@ -29,7 +30,6 @@ import ( "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sentry/platform/kvm/testutil" ktime "gvisor.dev/gvisor/pkg/sentry/time" - "gvisor.dev/gvisor/pkg/usermem" ) var dummyFPState = fpu.NewState() @@ -142,8 +142,8 @@ func applicationTest(t testHarness, useHostMappings bool, target func(), fn func // done for regular user code, but is fine for test // purposes.) applyPhysicalRegions(func(pr physicalRegion) bool { - pt.Map(usermem.Addr(pr.virtual), pr.length, pagetables.MapOpts{ - AccessType: usermem.AnyAccess, + pt.Map(hostarch.Addr(pr.virtual), pr.length, pagetables.MapOpts{ + AccessType: hostarch.AnyAccess, User: true, }, pr.physical) return true // Keep iterating. @@ -351,7 +351,7 @@ func TestInvalidate(t *testing.T) { break // Done. } // Unmap the page containing data & invalidate. - pt.Unmap(usermem.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(usermem.PageSize-1)), usermem.PageSize) + pt.Unmap(hostarch.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(hostarch.PageSize-1)), hostarch.PageSize) for { var si arch.SignalInfo if _, err := c.SwitchToUser(ring0.SwitchOpts{ diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go index 5d586f257..b3d4188a3 100644 --- a/pkg/sentry/platform/kvm/machine.go +++ b/pkg/sentry/platform/kvm/machine.go @@ -21,13 +21,13 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/atomicbitops" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/procid" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" ktime "gvisor.dev/gvisor/pkg/sentry/time" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // machine contains state associated with the VM as a whole. @@ -227,9 +227,9 @@ func newMachine(vm int) (*machine, error) { applyPhysicalRegions(func(pr physicalRegion) bool { // Map everything in the lower half. m.kernel.PageTables.Map( - usermem.Addr(pr.virtual), + hostarch.Addr(pr.virtual), pr.length, - pagetables.MapOpts{AccessType: usermem.AnyAccess}, + pagetables.MapOpts{AccessType: hostarch.AnyAccess}, pr.physical) return true // Keep iterating. diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go index 3af96c7e5..e8e209249 100644 --- a/pkg/sentry/platform/kvm/machine_amd64.go +++ b/pkg/sentry/platform/kvm/machine_amd64.go @@ -24,13 +24,13 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" - "gvisor.dev/gvisor/pkg/usermem" ) // initArchState initializes architecture-specific state. @@ -41,7 +41,7 @@ func (m *machine) initArchState() error { unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, - uintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 { + uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } @@ -256,19 +256,19 @@ func (c *vCPU) setSystemTime() error { // nonCanonical generates a canonical address return. // //go:nosplit -func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { *info = arch.SignalInfo{ Signo: signal, Code: arch.SignalInfoKernel, } info.SetAddr(addr) // Include address. - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit -func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() @@ -276,12 +276,12 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = arch.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) - accessType := usermem.AccessType{ + accessType := hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0, @@ -310,14 +310,14 @@ func loadByte(ptr *byte) byte { //go:nosplit func prefaultFloatingPointState(data *fpu.State) { size := len(*data) - for i := 0; i < size; i += usermem.PageSize { + for i := 0; i < size; i += hostarch.PageSize { loadByte(&(*data)[i]) } loadByte(&(*data)[size-1]) } // SwitchToUser unpacks architectural-details. -func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) @@ -353,7 +353,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. - return usermem.NoAccess, nil + return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) @@ -364,7 +364,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, @@ -380,9 +380,9 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. - return usermem.AccessType{}, platform.ErrContextSignalCPUID + return hostarch.AccessType{}, platform.ErrContextSignalCPUID } - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = arch.SignalInfo{ @@ -390,7 +390,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = arch.SignalInfo{ @@ -398,7 +398,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = arch.SignalInfo{ @@ -406,7 +406,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: @@ -415,17 +415,17 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = arch.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by @@ -471,9 +471,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { panic("impossible translation") } pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|r.virtual), + hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, - pagetables.MapOpts{AccessType: usermem.Execute}, + pagetables.MapOpts{AccessType: hostarch.Execute}, physical) } }) @@ -484,9 +484,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { panic("impossible translation") } pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|start), + hostarch.Addr(ring0.KernelStartAddress|start), regionLen, - pagetables.MapOpts{AccessType: usermem.ReadWrite}, + pagetables.MapOpts{AccessType: hostarch.ReadWrite}, physical) } } diff --git a/pkg/sentry/platform/kvm/machine_arm64.go b/pkg/sentry/platform/kvm/machine_arm64.go index 2edc9d1b2..03e84d804 100644 --- a/pkg/sentry/platform/kvm/machine_arm64.go +++ b/pkg/sentry/platform/kvm/machine_arm64.go @@ -17,12 +17,12 @@ package kvm import ( + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) type vCPUArchState struct { @@ -53,9 +53,9 @@ const ( func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { applyPhysicalRegions(func(pr physicalRegion) bool { pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|pr.virtual), + hostarch.Addr(ring0.KernelStartAddress|pr.virtual), pr.length, - pagetables.MapOpts{AccessType: usermem.AnyAccess, Global: true}, + pagetables.MapOpts{AccessType: hostarch.AnyAccess, Global: true}, pr.physical) return true // Keep iterating. @@ -117,13 +117,13 @@ func availableRegionsForSetMem() (phyRegions []physicalRegion) { // nonCanonical generates a canonical address return. // //go:nosplit -func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { *info = arch.SignalInfo{ Signo: signal, Code: arch.SignalInfoKernel, } info.SetAddr(addr) // Include address. - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal } // isInstructionAbort returns true if it is an instruction abort. @@ -148,7 +148,7 @@ func isWriteFault(code uint64) bool { // fault generates an appropriate fault return. // //go:nosplit -func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := c.GetFaultAddr() code, user := c.ErrorCode() @@ -157,7 +157,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. @@ -174,7 +174,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e info.Code = 2 } - accessType := usermem.AccessType{ + accessType := hostarch.AccessType{ Read: !isWriteFault(uint64(code)), Write: isWriteFault(uint64(code)), Execute: isInstructionAbort(uint64(code)), diff --git a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go index e7d5f3193..634e55ec0 100644 --- a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go +++ b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go @@ -23,12 +23,12 @@ import ( "unsafe" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) type kvmVcpuInit struct { @@ -209,7 +209,7 @@ func (c *vCPU) getOneRegister(reg *kvmOneReg) error { } // SwitchToUser unpacks architectural-details. -func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Pc) { return nonCanonical(regs.Pc, int32(unix.SIGSEGV), info) @@ -246,13 +246,13 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) switch vector { case ring0.Syscall: // Fast path: system call executed. - return usermem.NoAccess, nil + return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.El0ErrNMI: return c.fault(int32(unix.SIGBUS), info) case ring0.Vector(bounce): // ring0.VirtualizationException. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.El0SyncUndef: return c.fault(int32(unix.SIGILL), info) case ring0.El0SyncDbg: @@ -261,16 +261,16 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Pc) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.El0SyncSpPc: *info = arch.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal case ring0.El0SyncSys, ring0.El0SyncWfx: - return usermem.NoAccess, nil // skip for now. + return hostarch.NoAccess, nil // skip for now. default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go index 7376d8b8d..d812e6c26 100644 --- a/pkg/sentry/platform/kvm/physical_map.go +++ b/pkg/sentry/platform/kvm/physical_map.go @@ -19,9 +19,9 @@ import ( "sort" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/ring0" - "gvisor.dev/gvisor/pkg/usermem" ) type region struct { @@ -81,7 +81,7 @@ func fillAddressSpace() (excludedRegions []region) { // faultBlockSize, potentially causing up to faultBlockSize bytes in // internal fragmentation for each physical region. So we need to // account for this properly during allocation. - requiredAddr, ok := usermem.Addr(vSize - pSize + faultBlockSize).RoundUp() + requiredAddr, ok := hostarch.Addr(vSize - pSize + faultBlockSize).RoundUp() if !ok { panic(fmt.Sprintf( "overflow for vSize (%x) - pSize (%x) + faultBlockSize (%x)", @@ -99,7 +99,7 @@ func fillAddressSpace() (excludedRegions []region) { 0, 0) if errno != 0 { // Attempt half the size; overflow not possible. - currentAddr, _ := usermem.Addr(current >> 1).RoundUp() + currentAddr, _ := hostarch.Addr(current >> 1).RoundUp() current = uintptr(currentAddr) continue } @@ -134,8 +134,8 @@ func computePhysicalRegions(excludedRegions []region) (physicalRegions []physica return } if virtual == 0 { - virtual += usermem.PageSize - length -= usermem.PageSize + virtual += hostarch.PageSize + length -= hostarch.PageSize } if end := virtual + length; end > ring0.MaximumUserAddress { length -= (end - ring0.MaximumUserAddress) diff --git a/pkg/sentry/platform/kvm/virtual_map.go b/pkg/sentry/platform/kvm/virtual_map.go index 4dcdbf8a7..01d9eb39d 100644 --- a/pkg/sentry/platform/kvm/virtual_map.go +++ b/pkg/sentry/platform/kvm/virtual_map.go @@ -22,12 +22,12 @@ import ( "regexp" "strconv" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) type virtualRegion struct { region - accessType usermem.AccessType + accessType hostarch.AccessType shared bool offset uintptr filename string @@ -92,7 +92,7 @@ func applyVirtualRegions(fn func(vr virtualRegion)) error { virtual: uintptr(start), length: uintptr(end - start), }, - accessType: usermem.AccessType{ + accessType: hostarch.AccessType{ Read: read, Write: write, Execute: execute, diff --git a/pkg/sentry/platform/kvm/virtual_map_test.go b/pkg/sentry/platform/kvm/virtual_map_test.go index 9b4545fdd..1f4a774f3 100644 --- a/pkg/sentry/platform/kvm/virtual_map_test.go +++ b/pkg/sentry/platform/kvm/virtual_map_test.go @@ -18,12 +18,12 @@ import ( "testing" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) type checker struct { ok bool - accessType usermem.AccessType + accessType hostarch.AccessType } func (c *checker) Containing(addr uintptr) func(virtualRegion) { @@ -46,7 +46,7 @@ func TestParseMaps(t *testing.T) { // MMap a new page. addr, _, errno := unix.RawSyscall6( - unix.SYS_MMAP, 0, usermem.PageSize, + unix.SYS_MMAP, 0, hostarch.PageSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_ANONYMOUS|unix.MAP_PRIVATE, 0, 0) if errno != 0 { @@ -55,19 +55,19 @@ func TestParseMaps(t *testing.T) { // Re-parse maps. if err := applyVirtualRegions(c.Containing(addr)); err != nil { - unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0) + unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0) t.Fatalf("unexpected error: %v", err) } // Assert that it now does contain the region. if !c.ok { - unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0) + unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0) t.Fatalf("updated map does not contain 0x%08x, expected true", addr) } // Map the region as PROT_NONE. newAddr, _, errno := unix.RawSyscall6( - unix.SYS_MMAP, addr, usermem.PageSize, + unix.SYS_MMAP, addr, hostarch.PageSize, unix.PROT_NONE, unix.MAP_ANONYMOUS|unix.MAP_FIXED|unix.MAP_PRIVATE, 0, 0) if errno != 0 { @@ -89,5 +89,5 @@ func TestParseMaps(t *testing.T) { } // Unmap the region. - unix.RawSyscall(unix.SYS_MUNMAP, addr, usermem.PageSize, 0) + unix.RawSyscall(unix.SYS_MUNMAP, addr, hostarch.PageSize, 0) } diff --git a/pkg/sentry/platform/mmap_min_addr.go b/pkg/sentry/platform/mmap_min_addr.go index 091c2e365..7335bd802 100644 --- a/pkg/sentry/platform/mmap_min_addr.go +++ b/pkg/sentry/platform/mmap_min_addr.go @@ -20,7 +20,7 @@ import ( "strconv" "strings" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // systemMMapMinAddrSource is the source file. @@ -30,8 +30,8 @@ const systemMMapMinAddrSource = "/proc/sys/vm/mmap_min_addr" var systemMMapMinAddr uint64 // SystemMMapMinAddr returns the minimum system address. -func SystemMMapMinAddr() usermem.Addr { - return usermem.Addr(systemMMapMinAddr) +func SystemMMapMinAddr() hostarch.Addr { + return hostarch.Addr(systemMMapMinAddr) } // MMapMinAddr is a size zero struct that implements MinUserAddress based on @@ -41,7 +41,7 @@ type MMapMinAddr struct { } // MinUserAddress implements platform.MinUserAddresss. -func (*MMapMinAddr) MinUserAddress() usermem.Addr { +func (*MMapMinAddr) MinUserAddress() hostarch.Addr { return SystemMMapMinAddr() } diff --git a/pkg/sentry/platform/platform.go b/pkg/sentry/platform/platform.go index dcfe839a7..ef7814a6f 100644 --- a/pkg/sentry/platform/platform.go +++ b/pkg/sentry/platform/platform.go @@ -23,6 +23,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/seccomp" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/hostmm" @@ -62,16 +63,16 @@ type Platform interface { // for AddressSpace.MapFile. As a special case, a MapUnit of 0 indicates // that the cost of AddressSpace.MapFile is effectively independent of the // number of pages mapped. If MapUnit is non-zero, it must be a power-of-2 - // multiple of usermem.PageSize. + // multiple of hostarch.PageSize. MapUnit() uint64 // MinUserAddress returns the minimum mappable address on this // platform. - MinUserAddress() usermem.Addr + MinUserAddress() hostarch.Addr // MaxUserAddress returns the maximum mappable address on this // platform. - MaxUserAddress() usermem.Addr + MaxUserAddress() hostarch.Addr // NewAddressSpace returns a new memory context for this platform. // @@ -172,7 +173,7 @@ type MemoryManager interface { //usermem.IO provides access to the contents of a virtual memory space. usermem.IO // MMap establishes a memory mapping. - MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error) + MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) // AddressSpace returns the AddressSpace bound to mm. AddressSpace() AddressSpace } @@ -195,7 +196,7 @@ type Context interface { // // - ErrContextSignal: The Context was interrupted by a signal. The // returned *arch.SignalInfo contains information about the signal. If - // arch.SignalInfo.Signo == SIGSEGV, the returned usermem.AccessType + // arch.SignalInfo.Signo == SIGSEGV, the returned hostarch.AccessType // contains the access type of the triggering fault. The caller owns // the returned SignalInfo. // @@ -206,7 +207,7 @@ type Context interface { // concurrent call to Switch(). // // - ErrContextCPUPreempted: See the definition of that error for details. - Switch(ctx context.Context, mm MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error) + Switch(ctx context.Context, mm MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, hostarch.AccessType, error) // PullFullState() pulls a full state of the application thread. // @@ -302,14 +303,14 @@ type AddressSpace interface { // * at.Any() == true. // * At least one reference must be held on all pages in fr, and must // continue to be held as long as pages are mapped. - MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error + MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error // Unmap unmaps the given range. // // Preconditions: // * addr is page-aligned. // * length > 0. - Unmap(addr usermem.Addr, length uint64) + Unmap(addr hostarch.Addr, length uint64) // Release releases this address space. After releasing, a new AddressSpace // must be acquired via platform.NewAddressSpace(). @@ -337,67 +338,67 @@ type AddressSpaceIO interface { // CopyOut copies len(src) bytes from src to the memory mapped at addr. It // returns the number of bytes copied. If the number of bytes copied is < // len(src), it returns a non-nil error explaining why. - CopyOut(addr usermem.Addr, src []byte) (int, error) + CopyOut(addr hostarch.Addr, src []byte) (int, error) // CopyIn copies len(dst) bytes from the memory mapped at addr to dst. // It returns the number of bytes copied. If the number of bytes copied is // < len(dst), it returns a non-nil error explaining why. - CopyIn(addr usermem.Addr, dst []byte) (int, error) + CopyIn(addr hostarch.Addr, dst []byte) (int, error) // ZeroOut sets toZero bytes to 0, starting at addr. It returns the number // of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a // non-nil error explaining why. - ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error) + ZeroOut(addr hostarch.Addr, toZero uintptr) (uintptr, error) // SwapUint32 atomically sets the uint32 value at addr to new and returns // the previous value. // // Preconditions: addr must be aligned to a 4-byte boundary. - SwapUint32(addr usermem.Addr, new uint32) (uint32, error) + SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) // CompareAndSwapUint32 atomically compares the uint32 value at addr to // old; if they are equal, the value in memory is replaced by new. In // either case, the previous value stored in memory is returned. // // Preconditions: addr must be aligned to a 4-byte boundary. - CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) + CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) // LoadUint32 atomically loads the uint32 value at addr and returns it. // // Preconditions: addr must be aligned to a 4-byte boundary. - LoadUint32(addr usermem.Addr) (uint32, error) + LoadUint32(addr hostarch.Addr) (uint32, error) } // NoAddressSpaceIO implements AddressSpaceIO methods by panicking. type NoAddressSpaceIO struct{} // CopyOut implements AddressSpaceIO.CopyOut. -func (NoAddressSpaceIO) CopyOut(addr usermem.Addr, src []byte) (int, error) { +func (NoAddressSpaceIO) CopyOut(addr hostarch.Addr, src []byte) (int, error) { panic("This platform does not support AddressSpaceIO") } // CopyIn implements AddressSpaceIO.CopyIn. -func (NoAddressSpaceIO) CopyIn(addr usermem.Addr, dst []byte) (int, error) { +func (NoAddressSpaceIO) CopyIn(addr hostarch.Addr, dst []byte) (int, error) { panic("This platform does not support AddressSpaceIO") } // ZeroOut implements AddressSpaceIO.ZeroOut. -func (NoAddressSpaceIO) ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error) { +func (NoAddressSpaceIO) ZeroOut(addr hostarch.Addr, toZero uintptr) (uintptr, error) { panic("This platform does not support AddressSpaceIO") } // SwapUint32 implements AddressSpaceIO.SwapUint32. -func (NoAddressSpaceIO) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) { +func (NoAddressSpaceIO) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) { panic("This platform does not support AddressSpaceIO") } // CompareAndSwapUint32 implements AddressSpaceIO.CompareAndSwapUint32. -func (NoAddressSpaceIO) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) { +func (NoAddressSpaceIO) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) { panic("This platform does not support AddressSpaceIO") } // LoadUint32 implements AddressSpaceIO.LoadUint32. -func (NoAddressSpaceIO) LoadUint32(addr usermem.Addr) (uint32, error) { +func (NoAddressSpaceIO) LoadUint32(addr hostarch.Addr) (uint32, error) { panic("This platform does not support AddressSpaceIO") } @@ -406,7 +407,7 @@ func (NoAddressSpaceIO) LoadUint32(addr usermem.Addr) (uint32, error) { // permissions. type SegmentationFault struct { // Addr is the address at which the fault occurred. - Addr usermem.Addr + Addr hostarch.Addr } // Error implements error.Error. diff --git a/pkg/sentry/platform/ptrace/BUILD b/pkg/sentry/platform/ptrace/BUILD index 47efde6a2..d101f2f53 100644 --- a/pkg/sentry/platform/ptrace/BUILD +++ b/pkg/sentry/platform/ptrace/BUILD @@ -25,6 +25,7 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/procid", "//pkg/safecopy", @@ -35,7 +36,6 @@ go_library( "//pkg/sentry/platform", "//pkg/sentry/platform/interrupt", "//pkg/sync", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/platform/ptrace/ptrace.go b/pkg/sentry/platform/ptrace/ptrace.go index 571bfcc2e..828458ce2 100644 --- a/pkg/sentry/platform/ptrace/ptrace.go +++ b/pkg/sentry/platform/ptrace/ptrace.go @@ -49,11 +49,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" pkgcontext "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sentry/platform/interrupt" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) var ( @@ -88,28 +88,28 @@ type context struct { // lastFaultAddr is the last faulting address; this is only meaningful if // lastFaultSP is non-nil. - lastFaultAddr usermem.Addr + lastFaultAddr hostarch.Addr // lastFaultIP is the address of the last faulting instruction; // this is also only meaningful if lastFaultSP is non-nil. - lastFaultIP usermem.Addr + lastFaultIP hostarch.Addr } // Switch runs the provided context in the given address space. -func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error) { +func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, cpu int32) (*arch.SignalInfo, hostarch.AccessType, error) { as := mm.AddressSpace() s := as.(*subprocess) isSyscall := s.switchToApp(c, ac) var ( faultSP *subprocess - faultAddr usermem.Addr - faultIP usermem.Addr + faultAddr hostarch.Addr + faultIP hostarch.Addr ) if !isSyscall && linux.Signal(c.signalInfo.Signo) == linux.SIGSEGV { faultSP = s - faultAddr = usermem.Addr(c.signalInfo.Addr()) - faultIP = usermem.Addr(ac.IP()) + faultAddr = hostarch.Addr(c.signalInfo.Addr()) + faultIP = hostarch.Addr(ac.IP()) } // Update the context to reflect the outcome of this context switch. @@ -140,14 +140,14 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a } if isSyscall { - return nil, usermem.NoAccess, nil + return nil, hostarch.NoAccess, nil } si := c.signalInfo if faultSP == nil { // Non-fault signal. - return &si, usermem.NoAccess, platform.ErrContextSignal + return &si, hostarch.NoAccess, platform.ErrContextSignal } // Got a page fault. Ideally, we'd get real fault type here, but ptrace @@ -157,7 +157,7 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a // pointer. // // It was a write fault if the fault is immediately repeated. - at := usermem.Read + at := hostarch.Read if faultAddr == faultIP { at.Execute = true } @@ -235,8 +235,8 @@ func (*PTrace) MapUnit() uint64 { // MaxUserAddress returns the first address that may not be used by user // applications. -func (*PTrace) MaxUserAddress() usermem.Addr { - return usermem.Addr(stubStart) +func (*PTrace) MaxUserAddress() hostarch.Addr { + return hostarch.Addr(stubStart) } // NewAddressSpace returns a new subprocess. diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_unsafe.go index 01e73b019..facb96011 100644 --- a/pkg/sentry/platform/ptrace/ptrace_unsafe.go +++ b/pkg/sentry/platform/ptrace/ptrace_unsafe.go @@ -19,9 +19,9 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" - "gvisor.dev/gvisor/pkg/usermem" ) // getRegs gets the general purpose register set. @@ -122,7 +122,7 @@ func (t *thread) getSignalInfo(si *arch.SignalInfo) error { // // Precondition: the OS thread must be locked and own t. func (t *thread) clone() (*thread, error) { - r, ok := usermem.Addr(stackPointer(&t.initRegs)).RoundUp() + r, ok := hostarch.Addr(stackPointer(&t.initRegs)).RoundUp() if !ok { return nil, unix.EINVAL } diff --git a/pkg/sentry/platform/ptrace/stub_unsafe.go b/pkg/sentry/platform/ptrace/stub_unsafe.go index 780227248..5c9b7784f 100644 --- a/pkg/sentry/platform/ptrace/stub_unsafe.go +++ b/pkg/sentry/platform/ptrace/stub_unsafe.go @@ -19,8 +19,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/safecopy" - "gvisor.dev/gvisor/pkg/usermem" ) // stub is defined in arch-specific assembly. @@ -45,8 +45,8 @@ func stubInit() { stubLen := int(safecopy.FindEndAddress(stubBegin) - stubBegin) stubSlice := unsafeSlice(stubBegin, stubLen) mapLen := uintptr(stubLen) - if offset := mapLen % usermem.PageSize; offset != 0 { - mapLen += usermem.PageSize - offset + if offset := mapLen % hostarch.PageSize; offset != 0 { + mapLen += hostarch.PageSize - offset } for stubStart > 0 { @@ -70,7 +70,7 @@ func stubInit() { } // Attempt to begin at a lower address. - stubStart -= uintptr(usermem.PageSize) + stubStart -= uintptr(hostarch.PageSize) continue } diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go index d2284487a..9c73a725a 100644 --- a/pkg/sentry/platform/ptrace/subprocess.go +++ b/pkg/sentry/platform/ptrace/subprocess.go @@ -20,13 +20,13 @@ import ( "runtime" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/procid" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // Linux kernel errnos which "should never be seen by user programs", but will @@ -240,7 +240,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) { func (s *subprocess) unmap() { s.Unmap(0, uint64(stubStart)) if maximumUserAddress != stubEnd { - s.Unmap(usermem.Addr(stubEnd), uint64(maximumUserAddress-stubEnd)) + s.Unmap(hostarch.Addr(stubEnd), uint64(maximumUserAddress-stubEnd)) } } @@ -627,7 +627,7 @@ func (s *subprocess) syscall(sysno uintptr, args ...arch.SyscallArgument) (uintp } // MapFile implements platform.AddressSpace.MapFile. -func (s *subprocess) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error { +func (s *subprocess) MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error { var flags int if precommit { flags |= unix.MAP_POPULATE @@ -644,7 +644,7 @@ func (s *subprocess) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRan } // Unmap implements platform.AddressSpace.Unmap. -func (s *subprocess) Unmap(addr usermem.Addr, length uint64) { +func (s *subprocess) Unmap(addr hostarch.Addr, length uint64) { ar, ok := addr.ToRange(length) if !ok { panic(fmt.Sprintf("addr %#x + length %#x overflows", addr, length)) diff --git a/pkg/sentry/socket/BUILD b/pkg/sentry/socket/BUILD index 0ce42b6cc..080859125 100644 --- a/pkg/sentry/socket/BUILD +++ b/pkg/sentry/socket/BUILD @@ -10,6 +10,7 @@ go_library( "//pkg/abi/linux", "//pkg/binary", "//pkg/context", + "//pkg/hostarch", "//pkg/marshal", "//pkg/sentry/device", "//pkg/sentry/fs", diff --git a/pkg/sentry/socket/control/BUILD b/pkg/sentry/socket/control/BUILD index ebcc891b3..0e0e82365 100644 --- a/pkg/sentry/socket/control/BUILD +++ b/pkg/sentry/socket/control/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/abi/linux", "//pkg/binary", "//pkg/context", + "//pkg/hostarch", "//pkg/sentry/fs", "//pkg/sentry/kernel", "//pkg/sentry/kernel/auth", @@ -23,7 +24,6 @@ go_library( "//pkg/sentry/socket/unix/transport", "//pkg/sentry/vfs", "//pkg/syserror", - "//pkg/usermem", ], ) @@ -35,8 +35,8 @@ go_test( deps = [ "//pkg/abi/linux", "//pkg/binary", + "//pkg/hostarch", "//pkg/sentry/socket", - "//pkg/usermem", "@com_github_google_go_cmp//cmp:go_default_library", ], ) diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go index 65b556489..45a05cd63 100644 --- a/pkg/sentry/socket/control/control.go +++ b/pkg/sentry/socket/control/control.go @@ -20,13 +20,13 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/socket" "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) const maxInt = int(^uint(0) >> 1) @@ -181,12 +181,12 @@ func (c *scmCredentials) Equals(oc transport.CredentialsControlMessage) bool { } func putUint64(buf []byte, n uint64) []byte { - usermem.ByteOrder.PutUint64(buf[len(buf):len(buf)+8], n) + hostarch.ByteOrder.PutUint64(buf[len(buf):len(buf)+8], n) return buf[:len(buf)+8] } func putUint32(buf []byte, n uint32) []byte { - usermem.ByteOrder.PutUint32(buf[len(buf):len(buf)+4], n) + hostarch.ByteOrder.PutUint32(buf[len(buf):len(buf)+4], n) return buf[:len(buf)+4] } @@ -242,7 +242,7 @@ func putCmsgStruct(buf []byte, msgLevel, msgType uint32, align uint, data interf hdrBuf := buf - buf = binary.Marshal(buf, usermem.ByteOrder, data) + buf = binary.Marshal(buf, hostarch.ByteOrder, data) // If the control message data brought us over capacity, omit it. if cap(buf) != cap(ob) { @@ -475,7 +475,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) } var h linux.ControlMessageHeader - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h) if h.Length < uint64(linux.SizeOfControlMessageHeader) { return socket.ControlMessages{}, syserror.EINVAL @@ -499,7 +499,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) } for j := i; j < i+rightsSize; j += linux.SizeOfControlMessageRight { - fds = append(fds, int32(usermem.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight]))) + fds = append(fds, int32(hostarch.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight]))) } i += binary.AlignUp(length, width) @@ -510,7 +510,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) } var creds linux.ControlMessageCredentials - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds) scmCreds, err := NewSCMCredentials(t, creds) if err != nil { return socket.ControlMessages{}, err @@ -523,7 +523,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) return socket.ControlMessages{}, syserror.EINVAL } var ts linux.Timeval - binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &ts) + binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &ts) cmsgs.IP.Timestamp = ts.ToNsecCapped() cmsgs.IP.HasTimestamp = true i += binary.AlignUp(length, width) @@ -539,7 +539,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) return socket.ControlMessages{}, syserror.EINVAL } cmsgs.IP.HasTOS = true - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], usermem.ByteOrder, &cmsgs.IP.TOS) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &cmsgs.IP.TOS) i += binary.AlignUp(length, width) case linux.IP_PKTINFO: @@ -549,7 +549,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) cmsgs.IP.HasIPPacketInfo = true var packetInfo linux.ControlMessageIPPacketInfo - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageIPPacketInfo], usermem.ByteOrder, &packetInfo) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo) cmsgs.IP.PacketInfo = packetInfo i += binary.AlignUp(length, width) @@ -559,7 +559,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) if length < addr.SizeBytes() { return socket.ControlMessages{}, syserror.EINVAL } - binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr) + binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr) cmsgs.IP.OriginalDstAddress = &addr i += binary.AlignUp(length, width) @@ -583,7 +583,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) return socket.ControlMessages{}, syserror.EINVAL } cmsgs.IP.HasTClass = true - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], usermem.ByteOrder, &cmsgs.IP.TClass) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &cmsgs.IP.TClass) i += binary.AlignUp(length, width) case linux.IPV6_RECVORIGDSTADDR: @@ -591,7 +591,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint) if length < addr.SizeBytes() { return socket.ControlMessages{}, syserror.EINVAL } - binary.Unmarshal(buf[i:i+addr.SizeBytes()], usermem.ByteOrder, &addr) + binary.Unmarshal(buf[i:i+addr.SizeBytes()], hostarch.ByteOrder, &addr) cmsgs.IP.OriginalDstAddress = &addr i += binary.AlignUp(length, width) diff --git a/pkg/sentry/socket/control/control_test.go b/pkg/sentry/socket/control/control_test.go index d40a4cc85..7e28a0cef 100644 --- a/pkg/sentry/socket/control/control_test.go +++ b/pkg/sentry/socket/control/control_test.go @@ -22,8 +22,8 @@ import ( "github.com/google/go-cmp/cmp" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/socket" - "gvisor.dev/gvisor/pkg/usermem" ) func TestParse(t *testing.T) { @@ -35,12 +35,12 @@ func TestParse(t *testing.T) { Type: linux.SO_TIMESTAMP, } buf := make([]byte, 0, length) - buf = binary.Marshal(buf, usermem.ByteOrder, &hdr) + buf = binary.Marshal(buf, hostarch.ByteOrder, &hdr) ts := linux.Timeval{ Sec: 2401, Usec: 343, } - buf = binary.Marshal(buf, usermem.ByteOrder, &ts) + buf = binary.Marshal(buf, hostarch.ByteOrder, &ts) cmsg, err := Parse(nil, nil, buf, 8 /* width */) if err != nil { diff --git a/pkg/sentry/socket/hostinet/BUILD b/pkg/sentry/socket/hostinet/BUILD index a8e6f172b..a5c2155a2 100644 --- a/pkg/sentry/socket/hostinet/BUILD +++ b/pkg/sentry/socket/hostinet/BUILD @@ -20,6 +20,7 @@ go_library( "//pkg/binary", "//pkg/context", "//pkg/fdnotifier", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go index 2d9dbbdba..a784e23b5 100644 --- a/pkg/sentry/socket/hostinet/socket.go +++ b/pkg/sentry/socket/hostinet/socket.go @@ -22,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" @@ -321,7 +322,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error { } // GetSockOpt implements socket.Socket.GetSockOpt. -func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { if outLen < 0 { return nil, syserr.ErrInvalidArgument } @@ -527,24 +528,24 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.SO_TIMESTAMP: controlMessages.IP.HasTimestamp = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfTimeval], usermem.ByteOrder, &controlMessages.IP.Timestamp) + binary.Unmarshal(unixCmsg.Data[:linux.SizeOfTimeval], hostarch.ByteOrder, &controlMessages.IP.Timestamp) } case linux.SOL_IP: switch unixCmsg.Header.Type { case linux.IP_TOS: controlMessages.IP.HasTOS = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], usermem.ByteOrder, &controlMessages.IP.TOS) + binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTOS], hostarch.ByteOrder, &controlMessages.IP.TOS) case linux.IP_PKTINFO: controlMessages.IP.HasIPPacketInfo = true var packetInfo linux.ControlMessageIPPacketInfo - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], usermem.ByteOrder, &packetInfo) + binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageIPPacketInfo], hostarch.ByteOrder, &packetInfo) controlMessages.IP.PacketInfo = packetInfo case linux.IP_RECVORIGDSTADDR: var addr linux.SockAddrInet - binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr) + binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr) controlMessages.IP.OriginalDstAddress = &addr case unix.IP_RECVERR: @@ -557,11 +558,11 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.IPV6_TCLASS: controlMessages.IP.HasTClass = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], usermem.ByteOrder, &controlMessages.IP.TClass) + binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageTClass], hostarch.ByteOrder, &controlMessages.IP.TClass) case linux.IPV6_RECVORIGDSTADDR: var addr linux.SockAddrInet6 - binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], usermem.ByteOrder, &addr) + binary.Unmarshal(unixCmsg.Data[:addr.SizeBytes()], hostarch.ByteOrder, &addr) controlMessages.IP.OriginalDstAddress = &addr case unix.IPV6_RECVERR: @@ -574,7 +575,7 @@ func parseUnixControlMessages(unixControlMessages []unix.SocketControlMessage) s switch unixCmsg.Header.Type { case linux.TCP_INQ: controlMessages.IP.HasInq = true - binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], usermem.ByteOrder, &controlMessages.IP.Inq) + binary.Unmarshal(unixCmsg.Data[:linux.SizeOfControlMessageInq], hostarch.ByteOrder, &controlMessages.IP.Inq) } } } @@ -688,7 +689,7 @@ func (s *socketOpsCommon) State() uint32 { return 0 } - binary.Unmarshal(buf, usermem.ByteOrder, &info) + binary.Unmarshal(buf, hostarch.ByteOrder, &info) return uint32(info.State) } diff --git a/pkg/sentry/socket/hostinet/socket_unsafe.go b/pkg/sentry/socket/hostinet/socket_unsafe.go index 2890e640d..d3be2d825 100644 --- a/pkg/sentry/socket/hostinet/socket_unsafe.go +++ b/pkg/sentry/socket/hostinet/socket_unsafe.go @@ -20,6 +20,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/socket" @@ -61,7 +62,7 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument return 0, translateIOSyscallError(errno) } var buf [4]byte - usermem.ByteOrder.PutUint32(buf[:], uint32(val)) + hostarch.ByteOrder.PutUint32(buf[:], uint32(val)) _, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{ AddressSpaceActive: true, }) diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go index 5bcf92e14..26e8ae17a 100644 --- a/pkg/sentry/socket/hostinet/stack.go +++ b/pkg/sentry/socket/hostinet/stack.go @@ -22,11 +22,13 @@ import ( "reflect" "strconv" "strings" + "syscall" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/inet" "gvisor.dev/gvisor/pkg/syserr" @@ -146,7 +148,7 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli return fmt.Errorf("RTM_GETLINK returned RTM_NEWLINK message with invalid data length (%d bytes, expected at least %d bytes)", len(link.Data), unix.SizeofIfInfomsg) } var ifinfo unix.IfInfomsg - binary.Unmarshal(link.Data[:unix.SizeofIfInfomsg], usermem.ByteOrder, &ifinfo) + binary.Unmarshal(link.Data[:unix.SizeofIfInfomsg], hostarch.ByteOrder, &ifinfo) inetIF := inet.Interface{ DeviceType: ifinfo.Type, Flags: ifinfo.Flags, @@ -177,7 +179,7 @@ func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.Netli return fmt.Errorf("RTM_GETADDR returned RTM_NEWADDR message with invalid data length (%d bytes, expected at least %d bytes)", len(addr.Data), unix.SizeofIfAddrmsg) } var ifaddr unix.IfAddrmsg - binary.Unmarshal(addr.Data[:unix.SizeofIfAddrmsg], usermem.ByteOrder, &ifaddr) + binary.Unmarshal(addr.Data[:unix.SizeofIfAddrmsg], hostarch.ByteOrder, &ifaddr) inetAddr := inet.InterfaceAddr{ Family: ifaddr.Family, PrefixLen: ifaddr.Prefixlen, @@ -209,7 +211,7 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error) } var ifRoute unix.RtMsg - binary.Unmarshal(routeMsg.Data[:unix.SizeofRtMsg], usermem.ByteOrder, &ifRoute) + binary.Unmarshal(routeMsg.Data[:unix.SizeofRtMsg], hostarch.ByteOrder, &ifRoute) inetRoute := inet.Route{ Family: ifRoute.Family, DstLen: ifRoute.Dst_len, @@ -243,7 +245,7 @@ func ExtractHostRoutes(routeMsgs []syscall.NetlinkMessage) ([]inet.Route, error) if len(attr.Value) != expected { return nil, fmt.Errorf("RTM_GETROUTE returned RTM_NEWROUTE message with invalid attribute data length (%d bytes, expected %d bytes)", len(attr.Value), expected) } - binary.Unmarshal(attr.Value, usermem.ByteOrder, &inetRoute.OutputInterface) + binary.Unmarshal(attr.Value, hostarch.ByteOrder, &inetRoute.OutputInterface) } } diff --git a/pkg/sentry/socket/netfilter/BUILD b/pkg/sentry/socket/netfilter/BUILD index 8aea0200f..4381dfa06 100644 --- a/pkg/sentry/socket/netfilter/BUILD +++ b/pkg/sentry/socket/netfilter/BUILD @@ -20,12 +20,12 @@ go_library( deps = [ "//pkg/abi/linux", "//pkg/binary", + "//pkg/hostarch", "//pkg/log", "//pkg/sentry/kernel", "//pkg/syserr", "//pkg/tcpip", "//pkg/tcpip/header", "//pkg/tcpip/stack", - "//pkg/usermem", ], ) diff --git a/pkg/sentry/socket/netfilter/extensions.go b/pkg/sentry/socket/netfilter/extensions.go index e339f9bea..4bd305a44 100644 --- a/pkg/sentry/socket/netfilter/extensions.go +++ b/pkg/sentry/socket/netfilter/extensions.go @@ -19,10 +19,10 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) // TODO(gvisor.dev/issue/170): The following per-matcher params should be @@ -89,7 +89,7 @@ func marshalEntryMatch(name string, data []byte) []byte { copy(matcher.Name[:], name) buf := make([]byte, 0, size) - buf = binary.Marshal(buf, usermem.ByteOrder, matcher) + buf = binary.Marshal(buf, hostarch.ByteOrder, matcher) return append(buf, make([]byte, size-len(buf))...) } diff --git a/pkg/sentry/socket/netfilter/ipv4.go b/pkg/sentry/socket/netfilter/ipv4.go index 2f913787b..1fc4cb651 100644 --- a/pkg/sentry/socket/netfilter/ipv4.go +++ b/pkg/sentry/socket/netfilter/ipv4.go @@ -19,11 +19,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) // emptyIPv4Filter is for comparison with a rule's filters to determine whether @@ -142,7 +142,7 @@ func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, } var entry linux.IPTEntry buf := optVal[:linux.SizeOfIPTEntry] - binary.Unmarshal(buf, usermem.ByteOrder, &entry) + binary.Unmarshal(buf, hostarch.ByteOrder, &entry) initialOptValLen := len(optVal) optVal = optVal[linux.SizeOfIPTEntry:] diff --git a/pkg/sentry/socket/netfilter/ipv6.go b/pkg/sentry/socket/netfilter/ipv6.go index 263d9d3b5..67a52b628 100644 --- a/pkg/sentry/socket/netfilter/ipv6.go +++ b/pkg/sentry/socket/netfilter/ipv6.go @@ -19,11 +19,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) // emptyIPv6Filter is for comparison with a rule's filters to determine whether @@ -145,7 +145,7 @@ func modifyEntries6(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, } var entry linux.IP6TEntry buf := optVal[:linux.SizeOfIP6TEntry] - binary.Unmarshal(buf, usermem.ByteOrder, &entry) + binary.Unmarshal(buf, hostarch.ByteOrder, &entry) initialOptValLen := len(optVal) optVal = optVal[linux.SizeOfIP6TEntry:] diff --git a/pkg/sentry/socket/netfilter/netfilter.go b/pkg/sentry/socket/netfilter/netfilter.go index 7ae18b2a3..5200e08ed 100644 --- a/pkg/sentry/socket/netfilter/netfilter.go +++ b/pkg/sentry/socket/netfilter/netfilter.go @@ -23,12 +23,12 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) // enableLogging controls whether to log the (de)serialization of netfilter @@ -83,7 +83,7 @@ func DefaultLinuxTables() *stack.IPTables { } // GetInfo returns information about iptables. -func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, ipv6 bool) (linux.IPTGetinfo, *syserr.Error) { +func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, ipv6 bool) (linux.IPTGetinfo, *syserr.Error) { // Read in the struct and table name. var info linux.IPTGetinfo if _, err := info.CopyIn(t, outPtr); err != nil { @@ -106,7 +106,7 @@ func GetInfo(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, ipv6 bool) } // GetEntries4 returns netstack's iptables rules. -func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) { +func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLen int) (linux.KernelIPTGetEntries, *syserr.Error) { // Read in the struct and table name. var userEntries linux.IPTGetEntries if _, err := userEntries.CopyIn(t, outPtr); err != nil { @@ -130,7 +130,7 @@ func GetEntries4(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen } // GetEntries6 returns netstack's ip6tables rules. -func GetEntries6(t *kernel.Task, stack *stack.Stack, outPtr usermem.Addr, outLen int) (linux.KernelIP6TGetEntries, *syserr.Error) { +func GetEntries6(t *kernel.Task, stack *stack.Stack, outPtr hostarch.Addr, outLen int) (linux.KernelIP6TGetEntries, *syserr.Error) { // Read in the struct and table name. IPv4 and IPv6 utilize structs // with the same layout. var userEntries linux.IPTGetEntries @@ -179,7 +179,7 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error { var replace linux.IPTReplace replaceBuf := optVal[:linux.SizeOfIPTReplace] optVal = optVal[linux.SizeOfIPTReplace:] - binary.Unmarshal(replaceBuf, usermem.ByteOrder, &replace) + binary.Unmarshal(replaceBuf, hostarch.ByteOrder, &replace) // TODO(gvisor.dev/issue/170): Support other tables. var table stack.Table @@ -310,7 +310,7 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher, } var match linux.XTEntryMatch buf := optVal[:linux.SizeOfXTEntryMatch] - binary.Unmarshal(buf, usermem.ByteOrder, &match) + binary.Unmarshal(buf, hostarch.ByteOrder, &match) nflog("set entries: parsed entry match %q: %+v", match.Name.String(), match) // Check some invariants. @@ -381,7 +381,7 @@ func hookFromLinux(hook int) stack.Hook { // TargetRevision returns a linux.XTGetRevision for a given target. It sets // Revision to the highest supported value, unless the provided revision number // is larger. -func TargetRevision(t *kernel.Task, revPtr usermem.Addr, netProto tcpip.NetworkProtocolNumber) (linux.XTGetRevision, *syserr.Error) { +func TargetRevision(t *kernel.Task, revPtr hostarch.Addr, netProto tcpip.NetworkProtocolNumber) (linux.XTGetRevision, *syserr.Error) { // Read in the target name and version. var rev linux.XTGetRevision if _, err := rev.CopyIn(t, revPtr); err != nil { diff --git a/pkg/sentry/socket/netfilter/owner_matcher.go b/pkg/sentry/socket/netfilter/owner_matcher.go index 5f80d82ea..b2cc6be20 100644 --- a/pkg/sentry/socket/netfilter/owner_matcher.go +++ b/pkg/sentry/socket/netfilter/owner_matcher.go @@ -19,8 +19,8 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) const matcherNameOwner = "owner" @@ -60,7 +60,7 @@ func (ownerMarshaler) marshal(mr matcher) []byte { } buf := make([]byte, 0, linux.SizeOfIPTOwnerInfo) - return marshalEntryMatch(matcherNameOwner, binary.Marshal(buf, usermem.ByteOrder, iptOwnerInfo)) + return marshalEntryMatch(matcherNameOwner, binary.Marshal(buf, hostarch.ByteOrder, iptOwnerInfo)) } // unmarshal implements matchMaker.unmarshal. @@ -72,7 +72,7 @@ func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack. // For alignment reasons, the match's total size may // exceed what's strictly necessary to hold matchData. var matchData linux.IPTOwnerInfo - binary.Unmarshal(buf[:linux.SizeOfIPTOwnerInfo], usermem.ByteOrder, &matchData) + binary.Unmarshal(buf[:linux.SizeOfIPTOwnerInfo], hostarch.ByteOrder, &matchData) nflog("parseMatchers: parsed IPTOwnerInfo: %+v", matchData) var owner OwnerMatcher diff --git a/pkg/sentry/socket/netfilter/targets.go b/pkg/sentry/socket/netfilter/targets.go index f2653d523..80f8c6430 100644 --- a/pkg/sentry/socket/netfilter/targets.go +++ b/pkg/sentry/socket/netfilter/targets.go @@ -19,11 +19,11 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) // ErrorTargetName is used to mark targets as error targets. Error targets @@ -167,7 +167,7 @@ func (*standardTargetMaker) marshal(target target) []byte { } ret := make([]byte, 0, linux.SizeOfXTStandardTarget) - return binary.Marshal(ret, usermem.ByteOrder, xt) + return binary.Marshal(ret, hostarch.ByteOrder, xt) } func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -177,7 +177,7 @@ func (*standardTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) ( } var standardTarget linux.XTStandardTarget buf = buf[:linux.SizeOfXTStandardTarget] - binary.Unmarshal(buf, usermem.ByteOrder, &standardTarget) + binary.Unmarshal(buf, hostarch.ByteOrder, &standardTarget) if standardTarget.Verdict < 0 { // A Verdict < 0 indicates a non-jump verdict. @@ -223,7 +223,7 @@ func (*errorTargetMaker) marshal(target target) []byte { copy(xt.Target.Name[:], ErrorTargetName) ret := make([]byte, 0, linux.SizeOfXTErrorTarget) - return binary.Marshal(ret, usermem.ByteOrder, xt) + return binary.Marshal(ret, hostarch.ByteOrder, xt) } func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -233,7 +233,7 @@ func (*errorTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar } var errTgt linux.XTErrorTarget buf = buf[:linux.SizeOfXTErrorTarget] - binary.Unmarshal(buf, usermem.ByteOrder, &errTgt) + binary.Unmarshal(buf, hostarch.ByteOrder, &errTgt) // Error targets are used in 2 cases: // * An actual error case. These rules have an error named @@ -281,7 +281,7 @@ func (*redirectTargetMaker) marshal(target target) []byte { xt.NfRange.RangeIPV4.Flags |= linux.NF_NAT_RANGE_PROTO_SPECIFIED xt.NfRange.RangeIPV4.MinPort = htons(rt.Port) xt.NfRange.RangeIPV4.MaxPort = xt.NfRange.RangeIPV4.MinPort - return binary.Marshal(ret, usermem.ByteOrder, xt) + return binary.Marshal(ret, hostarch.ByteOrder, xt) } func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -297,7 +297,7 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) ( var rt linux.XTRedirectTarget buf = buf[:linux.SizeOfXTRedirectTarget] - binary.Unmarshal(buf, usermem.ByteOrder, &rt) + binary.Unmarshal(buf, hostarch.ByteOrder, &rt) // Copy linux.XTRedirectTarget to stack.RedirectTarget. target := redirectTarget{RedirectTarget: stack.RedirectTarget{ @@ -372,7 +372,7 @@ func (*nfNATTargetMaker) marshal(target target) []byte { nt.Range.MaxProto = nt.Range.MinProto ret := make([]byte, 0, nfNATMarhsalledSize) - return binary.Marshal(ret, usermem.ByteOrder, nt) + return binary.Marshal(ret, hostarch.ByteOrder, nt) } func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (target, *syserr.Error) { @@ -388,7 +388,7 @@ func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar var natRange linux.NFNATRange buf = buf[linux.SizeOfXTEntryTarget:nfNATMarhsalledSize] - binary.Unmarshal(buf, usermem.ByteOrder, &natRange) + binary.Unmarshal(buf, hostarch.ByteOrder, &natRange) // We don't support port or address ranges. if natRange.MinAddr != natRange.MaxAddr { @@ -454,7 +454,7 @@ func parseTarget(filter stack.IPHeaderFilter, optVal []byte, ipv6 bool) (stack.T } var target linux.XTEntryTarget buf := optVal[:linux.SizeOfXTEntryTarget] - binary.Unmarshal(buf, usermem.ByteOrder, &target) + binary.Unmarshal(buf, hostarch.ByteOrder, &target) return unmarshalTarget(target, filter, optVal) } @@ -487,11 +487,11 @@ func (jt *JumpTarget) Action(*stack.PacketBuffer, *stack.ConnTrack, stack.Hook, func ntohs(port uint16) uint16 { buf := make([]byte, 2) binary.BigEndian.PutUint16(buf, port) - return usermem.ByteOrder.Uint16(buf) + return hostarch.ByteOrder.Uint16(buf) } func htons(port uint16) uint16 { buf := make([]byte, 2) - usermem.ByteOrder.PutUint16(buf, port) + hostarch.ByteOrder.PutUint16(buf, port) return binary.BigEndian.Uint16(buf) } diff --git a/pkg/sentry/socket/netfilter/tcp_matcher.go b/pkg/sentry/socket/netfilter/tcp_matcher.go index 678d6b578..69557f515 100644 --- a/pkg/sentry/socket/netfilter/tcp_matcher.go +++ b/pkg/sentry/socket/netfilter/tcp_matcher.go @@ -19,9 +19,9 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) const matcherNameTCP = "tcp" @@ -48,7 +48,7 @@ func (tcpMarshaler) marshal(mr matcher) []byte { DestinationPortEnd: matcher.destinationPortEnd, } buf := make([]byte, 0, linux.SizeOfXTTCP) - return marshalEntryMatch(matcherNameTCP, binary.Marshal(buf, usermem.ByteOrder, xttcp)) + return marshalEntryMatch(matcherNameTCP, binary.Marshal(buf, hostarch.ByteOrder, xttcp)) } // unmarshal implements matchMaker.unmarshal. @@ -60,7 +60,7 @@ func (tcpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma // For alignment reasons, the match's total size may // exceed what's strictly necessary to hold matchData. var matchData linux.XTTCP - binary.Unmarshal(buf[:linux.SizeOfXTTCP], usermem.ByteOrder, &matchData) + binary.Unmarshal(buf[:linux.SizeOfXTTCP], hostarch.ByteOrder, &matchData) nflog("parseMatchers: parsed XTTCP: %+v", matchData) if matchData.Option != 0 || diff --git a/pkg/sentry/socket/netfilter/udp_matcher.go b/pkg/sentry/socket/netfilter/udp_matcher.go index f8568873f..6a60e6bd6 100644 --- a/pkg/sentry/socket/netfilter/udp_matcher.go +++ b/pkg/sentry/socket/netfilter/udp_matcher.go @@ -19,9 +19,9 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/usermem" ) const matcherNameUDP = "udp" @@ -48,7 +48,7 @@ func (udpMarshaler) marshal(mr matcher) []byte { DestinationPortEnd: matcher.destinationPortEnd, } buf := make([]byte, 0, linux.SizeOfXTUDP) - return marshalEntryMatch(matcherNameUDP, binary.Marshal(buf, usermem.ByteOrder, xtudp)) + return marshalEntryMatch(matcherNameUDP, binary.Marshal(buf, hostarch.ByteOrder, xtudp)) } // unmarshal implements matchMaker.unmarshal. @@ -60,7 +60,7 @@ func (udpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Ma // For alignment reasons, the match's total size may exceed what's // strictly necessary to hold matchData. var matchData linux.XTUDP - binary.Unmarshal(buf[:linux.SizeOfXTUDP], usermem.ByteOrder, &matchData) + binary.Unmarshal(buf[:linux.SizeOfXTUDP], hostarch.ByteOrder, &matchData) nflog("parseMatchers: parsed XTUDP: %+v", matchData) if matchData.InverseFlags != 0 { diff --git a/pkg/sentry/socket/netlink/BUILD b/pkg/sentry/socket/netlink/BUILD index 9313e1167..171b95c63 100644 --- a/pkg/sentry/socket/netlink/BUILD +++ b/pkg/sentry/socket/netlink/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/abi/linux", "//pkg/binary", "//pkg/context", + "//pkg/hostarch", "//pkg/marshal", "//pkg/marshal/primitive", "//pkg/sentry/arch", diff --git a/pkg/sentry/socket/netlink/message.go b/pkg/sentry/socket/netlink/message.go index 0899c61d1..ab0e68af7 100644 --- a/pkg/sentry/socket/netlink/message.go +++ b/pkg/sentry/socket/netlink/message.go @@ -20,7 +20,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // alignPad returns the length of padding required for alignment. @@ -42,7 +42,7 @@ type Message struct { func NewMessage(hdr linux.NetlinkMessageHeader) *Message { return &Message{ hdr: hdr, - buf: binary.Marshal(nil, usermem.ByteOrder, hdr), + buf: binary.Marshal(nil, hostarch.ByteOrder, hdr), } } @@ -58,7 +58,7 @@ func ParseMessage(buf []byte) (msg *Message, rest []byte, ok bool) { return } var hdr linux.NetlinkMessageHeader - binary.Unmarshal(hdrBytes, usermem.ByteOrder, &hdr) + binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr) // Msg portion. totalMsgLen := int(hdr.Length) @@ -105,7 +105,7 @@ func (m *Message) GetData(msg interface{}) (AttrsView, bool) { if !ok { return nil, false } - binary.Unmarshal(msgBytes, usermem.ByteOrder, msg) + binary.Unmarshal(msgBytes, hostarch.ByteOrder, msg) numPad := alignPad(linux.NetlinkMessageHeaderSize+size, linux.NLMSG_ALIGNTO) // Linux permits the last message not being aligned, just consume all of it. @@ -126,7 +126,7 @@ func (m *Message) GetData(msg interface{}) (AttrsView, bool) { // calling Finalize. func (m *Message) Finalize() []byte { // Update length, which is the first 4 bytes of the header. - usermem.ByteOrder.PutUint32(m.buf, uint32(len(m.buf))) + hostarch.ByteOrder.PutUint32(m.buf, uint32(len(m.buf))) // Align the message. Note that the message length in the header (set // above) is the useful length of the message, not the total aligned @@ -146,7 +146,7 @@ func (m *Message) putZeros(n int) { // Put serializes v into the message. func (m *Message) Put(v interface{}) { - m.buf = binary.Marshal(m.buf, usermem.ByteOrder, v) + m.buf = binary.Marshal(m.buf, hostarch.ByteOrder, v) } // PutAttr adds v to the message as a netlink attribute. @@ -251,7 +251,7 @@ func (v AttrsView) ParseFirst() (hdr linux.NetlinkAttrHeader, value []byte, rest if !ok { return } - binary.Unmarshal(hdrBytes, usermem.ByteOrder, &hdr) + binary.Unmarshal(hdrBytes, hostarch.ByteOrder, &hdr) value, ok = b.Extract(int(hdr.Length) - linux.NetlinkAttrHeaderSize) if !ok { diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go index d5ffc75ce..30c297149 100644 --- a/pkg/sentry/socket/netlink/socket.go +++ b/pkg/sentry/socket/netlink/socket.go @@ -22,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" @@ -222,7 +223,7 @@ func ExtractSockAddr(b []byte) (*linux.SockAddrNetlink, *syserr.Error) { } var sa linux.SockAddrNetlink - binary.Unmarshal(b[:linux.SockAddrNetlinkSize], usermem.ByteOrder, &sa) + binary.Unmarshal(b[:linux.SockAddrNetlinkSize], hostarch.ByteOrder, &sa) if sa.Family != linux.AF_NETLINK { return nil, syserr.ErrInvalidArgument @@ -327,7 +328,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error { } // GetSockOpt implements socket.Socket.GetSockOpt. -func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *socketOpsCommon) GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { switch level { case linux.SOL_SOCKET: switch name { @@ -388,7 +389,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt [] if len(opt) < sizeOfInt32 { return syserr.ErrInvalidArgument } - size := usermem.ByteOrder.Uint32(opt) + size := hostarch.ByteOrder.Uint32(opt) if size < minSendBufferSize { size = minSendBufferSize } else if size > maxSendBufferSize { @@ -411,7 +412,7 @@ func (s *socketOpsCommon) SetSockOpt(t *kernel.Task, level int, name int, opt [] if len(opt) < sizeOfInt32 { return syserr.ErrInvalidArgument } - passcred := usermem.ByteOrder.Uint32(opt) + passcred := hostarch.ByteOrder.Uint32(opt) s.ep.SocketOptions().SetPassCred(passcred != 0) return nil diff --git a/pkg/sentry/socket/netstack/BUILD b/pkg/sentry/socket/netstack/BUILD index 244d99436..0b39a5b67 100644 --- a/pkg/sentry/socket/netstack/BUILD +++ b/pkg/sentry/socket/netstack/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/abi/linux", "//pkg/binary", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go index 64e70ab9d..ed6572bab 100644 --- a/pkg/sentry/socket/netstack/netstack.go +++ b/pkg/sentry/socket/netstack/netstack.go @@ -37,6 +37,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" @@ -600,7 +601,7 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { return syserr.ErrInvalidArgument } - family := usermem.ByteOrder.Uint16(sockaddr) + family := hostarch.ByteOrder.Uint16(sockaddr) var addr tcpip.FullAddress // Bind for AF_PACKET requires only family, protocol and ifindex. @@ -611,7 +612,7 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { if len(sockaddr) < sockAddrLinkSize { return syserr.ErrInvalidArgument } - binary.Unmarshal(sockaddr[:sockAddrLinkSize], usermem.ByteOrder, &a) + binary.Unmarshal(sockaddr[:sockAddrLinkSize], hostarch.ByteOrder, &a) if a.Protocol != uint16(s.protocol) { return syserr.ErrInvalidArgument @@ -757,7 +758,7 @@ func (s *socketOpsCommon) Shutdown(t *kernel.Task, how int) *syserr.Error { // GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by // tcpip.Endpoint. -func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { // TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is // implemented specifically for netstack.SocketOperations rather than // commonEndpoint. commonEndpoint should be extended to support socket @@ -793,7 +794,7 @@ func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr us // GetSockOpt can be used to implement the linux syscall getsockopt(2) for // sockets backed by a commonEndpoint. -func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func GetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, family int, skType linux.SockType, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { switch level { case linux.SOL_SOCKET: return getSockOptSocket(t, s, ep, family, skType, name, outLen) @@ -1244,7 +1245,7 @@ func getSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name, } // getSockOptIPv6 implements GetSockOpt when level is SOL_IPV6. -func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { if _, ok := ep.(tcpip.Endpoint); !ok { log.Warningf("SOL_IPV6 options not supported on endpoints other than tcpip.Endpoint: option = %d", name) return nil, syserr.ErrUnknownProtocolOption @@ -1392,7 +1393,7 @@ func getSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name } // getSockOptIP implements GetSockOpt when level is SOL_IP. -func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr usermem.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) { +func getSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, outPtr hostarch.Addr, outLen int, family int) (marshal.Marshallable, *syserr.Error) { if _, ok := ep.(tcpip.Endpoint); !ok { log.Warningf("SOL_IP options not supported on endpoints other than tcpip.Endpoint: option = %d", name) return nil, syserr.ErrUnknownProtocolOption @@ -1602,7 +1603,7 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa } s.readMu.Lock() defer s.readMu.Unlock() - s.sockOptTimestamp = usermem.ByteOrder.Uint32(optVal) != 0 + s.sockOptTimestamp = hostarch.ByteOrder.Uint32(optVal) != 0 return nil } if level == linux.SOL_TCP && name == linux.TCP_INQ { @@ -1611,7 +1612,7 @@ func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVa } s.readMu.Lock() defer s.readMu.Unlock() - s.sockOptInq = usermem.ByteOrder.Uint32(optVal) != 0 + s.sockOptInq = hostarch.ByteOrder.Uint32(optVal) != 0 return nil } @@ -1659,7 +1660,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetSendBufferSize(int64(v), true) return nil @@ -1668,7 +1669,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.ReceiveBufferSizeOption, int(v))) case linux.SO_REUSEADDR: @@ -1676,7 +1677,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetReuseAddress(v != 0) return nil @@ -1685,7 +1686,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetReusePort(v != 0) return nil @@ -1714,7 +1715,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetBroadcast(v != 0) return nil @@ -1723,7 +1724,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetPassCred(v != 0) return nil @@ -1732,7 +1733,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetKeepAlive(v != 0) return nil @@ -1742,7 +1743,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Timeval - binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v) + binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v) if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) { return syserr.ErrDomain } @@ -1755,7 +1756,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Timeval - binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v) + binary.Unmarshal(optVal[:linux.SizeOfTimeval], hostarch.ByteOrder, &v) if v.Usec < 0 || v.Usec >= int64(time.Second/time.Microsecond) { return syserr.ErrDomain } @@ -1767,7 +1768,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) if v == 0 { socket.SetSockOptEmitUnimplementedEvent(t, name) @@ -1781,7 +1782,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetNoChecksum(v != 0) return nil @@ -1791,7 +1792,7 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam } var v linux.Linger - binary.Unmarshal(optVal[:linux.SizeOfLinger], usermem.ByteOrder, &v) + binary.Unmarshal(optVal[:linux.SizeOfLinger], hostarch.ByteOrder, &v) ep.SocketOptions().SetLinger(tcpip.LingerOption{ Enabled: v.OnOff != 0, @@ -1824,7 +1825,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetDelayOption(v == 0) return nil @@ -1833,7 +1834,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetCorkOption(v != 0) return nil @@ -1842,7 +1843,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetQuickAck(v != 0) return nil @@ -1851,7 +1852,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.MaxSegOption, int(v))) case linux.TCP_KEEPIDLE: @@ -1859,7 +1860,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) if v < 1 || v > linux.MAX_TCP_KEEPIDLE { return syserr.ErrInvalidArgument } @@ -1871,7 +1872,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) if v < 1 || v > linux.MAX_TCP_KEEPINTVL { return syserr.ErrInvalidArgument } @@ -1883,7 +1884,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) if v < 1 || v > linux.MAX_TCP_KEEPCNT { return syserr.ErrInvalidArgument } @@ -1894,7 +1895,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := int32(usermem.ByteOrder.Uint32(optVal)) + v := int32(hostarch.ByteOrder.Uint32(optVal)) if v < 0 { return syserr.ErrInvalidArgument } @@ -1913,7 +1914,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i return syserr.ErrInvalidArgument } - v := int32(usermem.ByteOrder.Uint32(optVal)) + v := int32(hostarch.ByteOrder.Uint32(optVal)) opt := tcpip.TCPLingerTimeoutOption(time.Second * time.Duration(v)) return syserr.TranslateNetstackError(ep.SetSockOpt(&opt)) @@ -1921,7 +1922,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i if len(optVal) < sizeOfInt32 { return syserr.ErrInvalidArgument } - v := int32(usermem.ByteOrder.Uint32(optVal)) + v := int32(hostarch.ByteOrder.Uint32(optVal)) if v < 0 { v = 0 } @@ -1932,7 +1933,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i if len(optVal) < sizeOfInt32 { return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.TCPSynCountOption, int(v))) @@ -1940,7 +1941,7 @@ func setSockOptTCP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name i if len(optVal) < sizeOfInt32 { return syserr.ErrInvalidArgument } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) return syserr.TranslateNetstackError(ep.SetSockOptInt(tcpip.TCPWindowClampOption, int(v))) @@ -1978,7 +1979,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name return syserr.ErrInvalidEndpointState } - v := usermem.ByteOrder.Uint32(optVal) + v := hostarch.ByteOrder.Uint32(optVal) ep.SocketOptions().SetV6Only(v != 0) return nil @@ -2024,7 +2025,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name if len(optVal) < sizeOfInt32 { return syserr.ErrInvalidArgument } - v := int32(usermem.ByteOrder.Uint32(optVal)) + v := int32(hostarch.ByteOrder.Uint32(optVal)) ep.SocketOptions().SetReceiveOriginalDstAddress(v != 0) return nil @@ -2033,7 +2034,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name if len(optVal) < sizeOfInt32 { return syserr.ErrInvalidArgument } - v := int32(usermem.ByteOrder.Uint32(optVal)) + v := int32(hostarch.ByteOrder.Uint32(optVal)) if v < -1 || v > 255 { return syserr.ErrInvalidArgument } @@ -2117,12 +2118,12 @@ func copyInMulticastRequest(optVal []byte, allowAddr bool) (linux.InetMulticastR if len(optVal) >= inetMulticastRequestWithNICSize { var req linux.InetMulticastRequestWithNIC - binary.Unmarshal(optVal[:inetMulticastRequestWithNICSize], usermem.ByteOrder, &req) + binary.Unmarshal(optVal[:inetMulticastRequestWithNICSize], hostarch.ByteOrder, &req) return req, nil } var req linux.InetMulticastRequestWithNIC - binary.Unmarshal(optVal[:inetMulticastRequestSize], usermem.ByteOrder, &req.InetMulticastRequest) + binary.Unmarshal(optVal[:inetMulticastRequestSize], hostarch.ByteOrder, &req.InetMulticastRequest) return req, nil } @@ -2132,7 +2133,7 @@ func copyInMulticastV6Request(optVal []byte) (linux.Inet6MulticastRequest, *syse } var req linux.Inet6MulticastRequest - binary.Unmarshal(optVal[:inet6MulticastRequestSize], usermem.ByteOrder, &req) + binary.Unmarshal(optVal[:inet6MulticastRequestSize], hostarch.ByteOrder, &req) return req, nil } @@ -2145,7 +2146,7 @@ func parseIntOrChar(buf []byte) (int32, *syserr.Error) { } if len(buf) >= sizeOfInt32 { - return int32(usermem.ByteOrder.Uint32(buf)), nil + return int32(hostarch.ByteOrder.Uint32(buf)), nil } return int32(buf[0]), nil @@ -3007,7 +3008,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe if arg == linux.SIOCGIFNAME { // Gets the name of the interface given the interface index // stored in ifr_ifindex. - index = int32(usermem.ByteOrder.Uint32(ifr.Data[:4])) + index = int32(hostarch.ByteOrder.Uint32(ifr.Data[:4])) if iface, ok := stack.Interfaces()[index]; ok { ifr.SetName(iface.Name) return nil @@ -3029,7 +3030,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe switch arg { case linux.SIOCGIFINDEX: // Copy out the index to the data. - usermem.ByteOrder.PutUint32(ifr.Data[:], uint32(index)) + hostarch.ByteOrder.PutUint32(ifr.Data[:], uint32(index)) case linux.SIOCGIFHWADDR: // Copy the hardware address out. @@ -3042,7 +3043,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe // sockaddr. sa_family contains the ARPHRD_* device type, // sa_data the L2 hardware address starting from byte 0. Setting // the hardware address is a privileged operation. - usermem.ByteOrder.PutUint16(ifr.Data[:], iface.DeviceType) + hostarch.ByteOrder.PutUint16(ifr.Data[:], iface.DeviceType) n := copy(ifr.Data[2:], iface.Addr) for i := 2 + n; i < len(ifr.Data); i++ { ifr.Data[i] = 0 // Clear padding. @@ -3055,7 +3056,7 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe } // Drop the flags that don't fit in the size that we need to return. This // matches Linux behavior. - usermem.ByteOrder.PutUint16(ifr.Data[:2], uint16(f)) + hostarch.ByteOrder.PutUint16(ifr.Data[:2], uint16(f)) case linux.SIOCGIFADDR: // Copy the IPv4 address out. @@ -3071,11 +3072,11 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe case linux.SIOCGIFMETRIC: // Gets the metric of the device. As per netdevice(7), this // always just sets ifr_metric to 0. - usermem.ByteOrder.PutUint32(ifr.Data[:4], 0) + hostarch.ByteOrder.PutUint32(ifr.Data[:4], 0) case linux.SIOCGIFMTU: // Gets the MTU of the device. - usermem.ByteOrder.PutUint32(ifr.Data[:4], iface.MTU) + hostarch.ByteOrder.PutUint32(ifr.Data[:4], iface.MTU) case linux.SIOCGIFMAP: // Gets the hardware parameters of the device. @@ -3101,8 +3102,8 @@ func interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFRe continue } // Populate ifr.ifr_netmask (type sockaddr). - usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(linux.AF_INET)) - usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0) + hostarch.ByteOrder.PutUint16(ifr.Data[0:2], uint16(linux.AF_INET)) + hostarch.ByteOrder.PutUint16(ifr.Data[2:4], 0) var mask uint32 = 0xffffffff << (32 - addr.PrefixLen) // Netmask is expected to be returned as a big endian // value. @@ -3157,14 +3158,14 @@ func ifconfIoctl(ctx context.Context, t *kernel.Task, io usermem.IO, ifc *linux. // Populate ifr.ifr_addr. ifr := linux.IFReq{} ifr.SetName(iface.Name) - usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family)) - usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0) + hostarch.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family)) + hostarch.ByteOrder.PutUint16(ifr.Data[2:4], 0) copy(ifr.Data[4:8], ifaceAddr.Addr[:4]) // Copy the ifr to userspace. dst := uintptr(ifc.Ptr) + uintptr(ifc.Len) ifc.Len += int32(linux.SizeOfIFReq) - if _, err := ifr.CopyOut(t, usermem.Addr(dst)); err != nil { + if _, err := ifr.CopyOut(t, hostarch.Addr(dst)); err != nil { return err } } diff --git a/pkg/sentry/socket/netstack/netstack_vfs2.go b/pkg/sentry/socket/netstack/netstack_vfs2.go index fc29f8f13..30f3ad153 100644 --- a/pkg/sentry/socket/netstack/netstack_vfs2.go +++ b/pkg/sentry/socket/netstack/netstack_vfs2.go @@ -17,6 +17,7 @@ package netstack import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" @@ -197,7 +198,7 @@ func (s *SocketVFS2) Ioctl(ctx context.Context, uio usermem.IO, args arch.Syscal // GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by // tcpip.Endpoint. -func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { // TODO(b/78348848): Unlike other socket options, SO_TIMESTAMP is // implemented specifically for netstack.SocketVFS2 rather than // commonEndpoint. commonEndpoint should be extended to support socket @@ -245,7 +246,7 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by } s.readMu.Lock() defer s.readMu.Unlock() - s.sockOptTimestamp = usermem.ByteOrder.Uint32(optVal) != 0 + s.sockOptTimestamp = hostarch.ByteOrder.Uint32(optVal) != 0 return nil } if level == linux.SOL_TCP && name == linux.TCP_INQ { @@ -254,7 +255,7 @@ func (s *SocketVFS2) SetSockOpt(t *kernel.Task, level int, name int, optVal []by } s.readMu.Lock() defer s.readMu.Unlock() - s.sockOptInq = usermem.ByteOrder.Uint32(optVal) != 0 + s.sockOptInq = hostarch.ByteOrder.Uint32(optVal) != 0 return nil } diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go index 909341dcf..4c3d48096 100644 --- a/pkg/sentry/socket/socket.go +++ b/pkg/sentry/socket/socket.go @@ -26,6 +26,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/binary" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/device" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -216,7 +217,7 @@ type SocketOps interface { Shutdown(t *kernel.Task, how int) *syserr.Error // GetSockOpt implements the getsockopt(2) linux unix. - GetSockOpt(t *kernel.Task, level int, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) + GetSockOpt(t *kernel.Task, level int, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) // SetSockOpt implements the setsockopt(2) linux unix. SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error @@ -356,7 +357,7 @@ func NewDirent(ctx context.Context, d *device.Device) *fs.Dirent { Type: fs.Socket, DeviceID: d.DeviceID(), InodeID: ino, - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, }) // Dirent name matches net/socket.c:sockfs_dname. @@ -571,19 +572,19 @@ func UnmarshalSockAddr(family int, data []byte) linux.SockAddr { switch family { case unix.AF_INET: var addr linux.SockAddrInet - binary.Unmarshal(data[:unix.SizeofSockaddrInet4], usermem.ByteOrder, &addr) + binary.Unmarshal(data[:unix.SizeofSockaddrInet4], hostarch.ByteOrder, &addr) return &addr case unix.AF_INET6: var addr linux.SockAddrInet6 - binary.Unmarshal(data[:unix.SizeofSockaddrInet6], usermem.ByteOrder, &addr) + binary.Unmarshal(data[:unix.SizeofSockaddrInet6], hostarch.ByteOrder, &addr) return &addr case unix.AF_UNIX: var addr linux.SockAddrUnix - binary.Unmarshal(data[:unix.SizeofSockaddrUnix], usermem.ByteOrder, &addr) + binary.Unmarshal(data[:unix.SizeofSockaddrUnix], hostarch.ByteOrder, &addr) return &addr case unix.AF_NETLINK: var addr linux.SockAddrNetlink - binary.Unmarshal(data[:unix.SizeofSockaddrNetlink], usermem.ByteOrder, &addr) + binary.Unmarshal(data[:unix.SizeofSockaddrNetlink], hostarch.ByteOrder, &addr) return &addr default: panic(fmt.Sprintf("Unsupported socket family %v", family)) @@ -693,7 +694,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { } // Get the rest of the fields based on the address family. - switch family := usermem.ByteOrder.Uint16(addr); family { + switch family := hostarch.ByteOrder.Uint16(addr); family { case linux.AF_UNIX: path := addr[2:] if len(path) > linux.UnixPathMax { @@ -715,7 +716,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrInetSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrInetSize], usermem.ByteOrder, &a) + binary.Unmarshal(addr[:sockAddrInetSize], hostarch.ByteOrder, &a) out := tcpip.FullAddress{ Addr: BytesToIPAddress(a.Addr[:]), @@ -728,7 +729,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrInet6Size { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrInet6Size], usermem.ByteOrder, &a) + binary.Unmarshal(addr[:sockAddrInet6Size], hostarch.ByteOrder, &a) out := tcpip.FullAddress{ Addr: BytesToIPAddress(a.Addr[:]), @@ -744,7 +745,7 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) { if len(addr) < sockAddrLinkSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } - binary.Unmarshal(addr[:sockAddrLinkSize], usermem.ByteOrder, &a) + binary.Unmarshal(addr[:sockAddrLinkSize], hostarch.ByteOrder, &a) if a.Family != linux.AF_PACKET || a.HardwareAddrLen != header.EthernetAddressSize { return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument } diff --git a/pkg/sentry/socket/unix/BUILD b/pkg/sentry/socket/unix/BUILD index ff53a26b7..c9cbefb3a 100644 --- a/pkg/sentry/socket/unix/BUILD +++ b/pkg/sentry/socket/unix/BUILD @@ -40,6 +40,7 @@ go_library( "//pkg/abi/linux", "//pkg/context", "//pkg/fspath", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/refs", diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go index b22f7973a..db7b1affe 100644 --- a/pkg/sentry/socket/unix/unix.go +++ b/pkg/sentry/socket/unix/unix.go @@ -24,6 +24,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -192,7 +193,7 @@ func (s *SocketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, // GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by // a transport.Endpoint. -func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen) } diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go index 7890d1048..c39e317ff 100644 --- a/pkg/sentry/socket/unix/unix_vfs2.go +++ b/pkg/sentry/socket/unix/unix_vfs2.go @@ -18,6 +18,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fsimpl/sockfs" @@ -112,7 +113,7 @@ func (s *SocketVFS2) Release(ctx context.Context) { // GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by // a transport.Endpoint. -func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr usermem.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { +func (s *SocketVFS2) GetSockOpt(t *kernel.Task, level, name int, outPtr hostarch.Addr, outLen int) (marshal.Marshallable, *syserr.Error) { return netstack.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outPtr, outLen) } diff --git a/pkg/sentry/strace/BUILD b/pkg/sentry/strace/BUILD index 1b7fd2232..2ebd77f82 100644 --- a/pkg/sentry/strace/BUILD +++ b/pkg/sentry/strace/BUILD @@ -28,6 +28,7 @@ go_library( "//pkg/binary", "//pkg/bits", "//pkg/eventchannel", + "//pkg/hostarch", "//pkg/marshal/primitive", "//pkg/seccomp", "//pkg/sentry/arch", @@ -35,7 +36,6 @@ go_library( "//pkg/sentry/socket", "//pkg/sentry/socket/netlink", "//pkg/sentry/syscalls/linux", - "//pkg/usermem", "@org_golang_x_sys//unix:go_default_library", ], ) diff --git a/pkg/sentry/strace/epoll.go b/pkg/sentry/strace/epoll.go index ae3b998c8..48650e3f9 100644 --- a/pkg/sentry/strace/epoll.go +++ b/pkg/sentry/strace/epoll.go @@ -21,10 +21,11 @@ import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) -func epollEvent(t *kernel.Task, eventAddr usermem.Addr) string { +func epollEvent(t *kernel.Task, eventAddr hostarch.Addr) string { var e linux.EpollEvent if _, err := e.CopyIn(t, eventAddr); err != nil { return fmt.Sprintf("%#x {error reading event: %v}", eventAddr, err) @@ -35,7 +36,7 @@ func epollEvent(t *kernel.Task, eventAddr usermem.Addr) string { return sb.String() } -func epollEvents(t *kernel.Task, eventsAddr usermem.Addr, numEvents, maxBytes uint64) string { +func epollEvents(t *kernel.Task, eventsAddr hostarch.Addr, numEvents, maxBytes uint64) string { var sb strings.Builder fmt.Fprintf(&sb, "%#x {", eventsAddr) addr := eventsAddr diff --git a/pkg/sentry/strace/poll.go b/pkg/sentry/strace/poll.go index 074e80f9b..572a8b50b 100644 --- a/pkg/sentry/strace/poll.go +++ b/pkg/sentry/strace/poll.go @@ -22,7 +22,8 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/sentry/kernel" slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // PollEventSet is the set of poll(2) event flags. @@ -52,7 +53,7 @@ func pollFD(t *kernel.Task, pfd *linux.PollFD, post bool) string { return fmt.Sprintf("{FD: %s, Events: %s, REvents: %s}", fd(t, pfd.FD), PollEventSet.Parse(uint64(pfd.Events)), revents) } -func pollFDs(t *kernel.Task, addr usermem.Addr, nfds uint, post bool) string { +func pollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint, post bool) string { if addr == 0 { return "null" } diff --git a/pkg/sentry/strace/select.go b/pkg/sentry/strace/select.go index 3a4c32aa0..e6e928157 100644 --- a/pkg/sentry/strace/select.go +++ b/pkg/sentry/strace/select.go @@ -19,7 +19,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) func fdsFromSet(t *kernel.Task, set []byte) []int { @@ -35,7 +36,7 @@ func fdsFromSet(t *kernel.Task, set []byte) []int { return fds } -func fdSet(t *kernel.Task, nfds int, addr usermem.Addr) string { +func fdSet(t *kernel.Task, nfds int, addr hostarch.Addr) string { if nfds < 0 { return fmt.Sprintf("%#x (negative nfds)", addr) } diff --git a/pkg/sentry/strace/signal.go b/pkg/sentry/strace/signal.go index c41f36e3f..e5b379a20 100644 --- a/pkg/sentry/strace/signal.go +++ b/pkg/sentry/strace/signal.go @@ -21,7 +21,8 @@ import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // signalNames contains the names of all named signals. @@ -100,7 +101,7 @@ var sigActionFlags = abi.FlagSet{ }, } -func sigSet(t *kernel.Task, addr usermem.Addr) string { +func sigSet(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -110,7 +111,7 @@ func sigSet(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x (error copying sigset: %v)", addr, err) } - set := linux.SignalSet(usermem.ByteOrder.Uint64(b[:])) + set := linux.SignalSet(hostarch.ByteOrder.Uint64(b[:])) return fmt.Sprintf("%#x %s", addr, formatSigSet(set)) } @@ -124,7 +125,7 @@ func formatSigSet(set linux.SignalSet) string { return fmt.Sprintf("[%v]", strings.Join(signals, " ")) } -func sigAction(t *kernel.Task, addr usermem.Addr) string { +func sigAction(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } diff --git a/pkg/sentry/strace/socket.go b/pkg/sentry/strace/socket.go index d943a7cb1..e5b7f9b96 100644 --- a/pkg/sentry/strace/socket.go +++ b/pkg/sentry/strace/socket.go @@ -26,7 +26,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/socket" "gvisor.dev/gvisor/pkg/sentry/socket/netlink" slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // SocketFamily are the possible socket(2) families. @@ -161,7 +162,7 @@ var controlMessageType = map[int32]string{ linux.SO_TIMESTAMP: "SO_TIMESTAMP", } -func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) string { +func cmsghdr(t *kernel.Task, addr hostarch.Addr, length uint64, maxBytes uint64) string { if length > maxBytes { return fmt.Sprintf("%#x (error decoding control: invalid length (%d))", addr, length) } @@ -180,7 +181,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) } var h linux.ControlMessageHeader - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], hostarch.ByteOrder, &h) var skipData bool level := "SOL_SOCKET" @@ -230,7 +231,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) numRights := rightsSize / linux.SizeOfControlMessageRight fds := make(linux.ControlMessageRights, numRights) - binary.Unmarshal(buf[i:i+rightsSize], usermem.ByteOrder, &fds) + binary.Unmarshal(buf[i:i+rightsSize], hostarch.ByteOrder, &fds) rights := make([]string, 0, len(fds)) for _, fd := range fds { @@ -257,7 +258,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) } var creds linux.ControlMessageCredentials - binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds) + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], hostarch.ByteOrder, &creds) strs = append(strs, fmt.Sprintf( "{level=%s, type=%s, length=%d, pid: %d, uid: %d, gid: %d}", @@ -281,7 +282,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) } var tv linux.Timeval - binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], usermem.ByteOrder, &tv) + binary.Unmarshal(buf[i:i+linux.SizeOfTimeval], hostarch.ByteOrder, &tv) strs = append(strs, fmt.Sprintf( "{level=%s, type=%s, length=%d, Sec: %d, Usec: %d}", @@ -301,7 +302,7 @@ func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) return fmt.Sprintf("%#x %s", addr, strings.Join(strs, ", ")) } -func msghdr(t *kernel.Task, addr usermem.Addr, printContent bool, maxBytes uint64) string { +func msghdr(t *kernel.Task, addr hostarch.Addr, printContent bool, maxBytes uint64) string { var msg slinux.MessageHeader64 if _, err := msg.CopyIn(t, addr); err != nil { return fmt.Sprintf("%#x (error decoding msghdr: %v)", addr, err) @@ -311,17 +312,17 @@ func msghdr(t *kernel.Task, addr usermem.Addr, printContent bool, maxBytes uint6 addr, msg.Name, msg.NameLen, - iovecs(t, usermem.Addr(msg.Iov), int(msg.IovLen), printContent, maxBytes), + iovecs(t, hostarch.Addr(msg.Iov), int(msg.IovLen), printContent, maxBytes), ) if printContent { - s = fmt.Sprintf("%s, control={%s}", s, cmsghdr(t, usermem.Addr(msg.Control), msg.ControlLen, maxBytes)) + s = fmt.Sprintf("%s, control={%s}", s, cmsghdr(t, hostarch.Addr(msg.Control), msg.ControlLen, maxBytes)) } else { s = fmt.Sprintf("%s, control=%#x, control_len=%d", s, msg.Control, msg.ControlLen) } return fmt.Sprintf("%s, flags=%d}", s, msg.Flags) } -func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string { +func sockAddr(t *kernel.Task, addr hostarch.Addr, length uint32) string { if addr == 0 { return "null" } @@ -335,7 +336,7 @@ func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string { if len(b) < 2 { return fmt.Sprintf("%#x {address too short: %d bytes}", addr, len(b)) } - family := usermem.ByteOrder.Uint16(b) + family := hostarch.ByteOrder.Uint16(b) familyStr := SocketFamily.Parse(uint64(family)) @@ -362,7 +363,7 @@ func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string { } } -func postSockAddr(t *kernel.Task, addr usermem.Addr, lengthPtr usermem.Addr) string { +func postSockAddr(t *kernel.Task, addr hostarch.Addr, lengthPtr hostarch.Addr) string { if addr == 0 { return "null" } @@ -379,14 +380,14 @@ func postSockAddr(t *kernel.Task, addr usermem.Addr, lengthPtr usermem.Addr) str return sockAddr(t, addr, l) } -func copySockLen(t *kernel.Task, addr usermem.Addr) (uint32, error) { +func copySockLen(t *kernel.Task, addr hostarch.Addr) (uint32, error) { // socklen_t is 32-bits. var l primitive.Uint32 _, err := l.CopyIn(t, addr) return uint32(l), err } -func sockLenPointer(t *kernel.Task, addr usermem.Addr) string { +func sockLenPointer(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -420,7 +421,7 @@ func sockFlags(flags int32) string { return SocketFlagSet.Parse(uint64(flags)) } -func getSockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, optLen usermem.Addr, maximumBlobSize uint, rval uintptr) string { +func getSockOptVal(t *kernel.Task, level, optname uint64, optVal hostarch.Addr, optLen hostarch.Addr, maximumBlobSize uint, rval uintptr) string { if int(rval) < 0 { return hexNum(uint64(optVal)) } @@ -434,7 +435,7 @@ func getSockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, o return sockOptVal(t, level, optname, optVal, uint64(l), maximumBlobSize) } -func sockOptVal(t *kernel.Task, level, optname uint64, optVal usermem.Addr, optLen uint64, maximumBlobSize uint) string { +func sockOptVal(t *kernel.Task, level, optname uint64, optVal hostarch.Addr, optLen uint64, maximumBlobSize uint) string { switch optLen { case 1: var v primitive.Uint8 diff --git a/pkg/sentry/strace/strace.go b/pkg/sentry/strace/strace.go index 396744597..ec5d5f846 100644 --- a/pkg/sentry/strace/strace.go +++ b/pkg/sentry/strace/strace.go @@ -32,7 +32,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" pb "gvisor.dev/gvisor/pkg/sentry/strace/strace_go_proto" slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // DefaultLogMaximumSize is the default LogMaximumSize. @@ -62,7 +63,7 @@ func hexArg(arg arch.SyscallArgument) string { return hexNum(arg.Uint64()) } -func iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, maxBytes uint64) string { +func iovecs(t *kernel.Task, addr hostarch.Addr, iovcnt int, printContent bool, maxBytes uint64) string { if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV { return fmt.Sprintf("%#x (error decoding iovecs: invalid iovcnt)", addr) } @@ -107,7 +108,7 @@ func iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, ma return fmt.Sprintf("%#x %s", addr, strings.Join(iovs, ", ")) } -func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) string { +func dump(t *kernel.Task, addr hostarch.Addr, size uint, maximumBlobSize uint) string { origSize := size if size > maximumBlobSize { size = maximumBlobSize @@ -131,7 +132,7 @@ func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) st return fmt.Sprintf("%#x %q%s", addr, b[:amt], dot) } -func path(t *kernel.Task, addr usermem.Addr) string { +func path(t *kernel.Task, addr hostarch.Addr) string { path, err := t.CopyInString(addr, linux.PATH_MAX) if err != nil { return fmt.Sprintf("%#x (error decoding path: %s)", addr, err) @@ -196,7 +197,7 @@ func fdVFS2(t *kernel.Task, fd int32) string { return fmt.Sprintf("%#x %s", fd, name) } -func fdpair(t *kernel.Task, addr usermem.Addr) string { +func fdpair(t *kernel.Task, addr hostarch.Addr) string { var fds [2]int32 _, err := primitive.CopyInt32SliceIn(t, addr, fds[:]) if err != nil { @@ -206,7 +207,7 @@ func fdpair(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x [%d %d]", addr, fds[0], fds[1]) } -func uname(t *kernel.Task, addr usermem.Addr) string { +func uname(t *kernel.Task, addr hostarch.Addr) string { var u linux.UtsName if _, err := u.CopyIn(t, addr); err != nil { return fmt.Sprintf("%#x (error decoding utsname: %s)", addr, err) @@ -215,7 +216,7 @@ func uname(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x %s", addr, u) } -func utimensTimespec(t *kernel.Task, addr usermem.Addr) string { +func utimensTimespec(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -237,7 +238,7 @@ func utimensTimespec(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {sec=%v nsec=%s}", addr, tim.Sec, ns) } -func timespec(t *kernel.Task, addr usermem.Addr) string { +func timespec(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -249,7 +250,7 @@ func timespec(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {sec=%v nsec=%v}", addr, tim.Sec, tim.Nsec) } -func timeval(t *kernel.Task, addr usermem.Addr) string { +func timeval(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -262,7 +263,7 @@ func timeval(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {sec=%v usec=%v}", addr, tim.Sec, tim.Usec) } -func utimbuf(t *kernel.Task, addr usermem.Addr) string { +func utimbuf(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -275,7 +276,7 @@ func utimbuf(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {actime=%v, modtime=%v}", addr, utim.Actime, utim.Modtime) } -func stat(t *kernel.Task, addr usermem.Addr) string { +func stat(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -287,27 +288,27 @@ func stat(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {dev=%d, ino=%d, mode=%s, nlink=%d, uid=%d, gid=%d, rdev=%d, size=%d, blksize=%d, blocks=%d, atime=%s, mtime=%s, ctime=%s}", addr, stat.Dev, stat.Ino, linux.FileMode(stat.Mode), stat.Nlink, stat.UID, stat.GID, stat.Rdev, stat.Size, stat.Blksize, stat.Blocks, time.Unix(stat.ATime.Sec, stat.ATime.Nsec), time.Unix(stat.MTime.Sec, stat.MTime.Nsec), time.Unix(stat.CTime.Sec, stat.CTime.Nsec)) } -func itimerval(t *kernel.Task, addr usermem.Addr) string { +func itimerval(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } interval := timeval(t, addr) - value := timeval(t, addr+usermem.Addr((*linux.Timeval)(nil).SizeBytes())) + value := timeval(t, addr+hostarch.Addr((*linux.Timeval)(nil).SizeBytes())) return fmt.Sprintf("%#x {interval=%s, value=%s}", addr, interval, value) } -func itimerspec(t *kernel.Task, addr usermem.Addr) string { +func itimerspec(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } interval := timespec(t, addr) - value := timespec(t, addr+usermem.Addr((*linux.Timespec)(nil).SizeBytes())) + value := timespec(t, addr+hostarch.Addr((*linux.Timespec)(nil).SizeBytes())) return fmt.Sprintf("%#x {interval=%s, value=%s}", addr, interval, value) } -func stringVector(t *kernel.Task, addr usermem.Addr) string { +func stringVector(t *kernel.Task, addr hostarch.Addr) string { vec, err := t.CopyInVector(addr, slinux.ExecMaxElemSize, slinux.ExecMaxTotalSize) if err != nil { return fmt.Sprintf("%#x {error copying vector: %v}", addr, err) @@ -323,7 +324,7 @@ func stringVector(t *kernel.Task, addr usermem.Addr) string { return s } -func rusage(t *kernel.Task, addr usermem.Addr) string { +func rusage(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -335,7 +336,7 @@ func rusage(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x %+v", addr, ru) } -func capHeader(t *kernel.Task, addr usermem.Addr) string { +func capHeader(t *kernel.Task, addr hostarch.Addr) string { if addr == 0 { return "null" } @@ -360,7 +361,7 @@ func capHeader(t *kernel.Task, addr usermem.Addr) string { return fmt.Sprintf("%#x {Version: %s, Pid: %d}", addr, version, hdr.Pid) } -func capData(t *kernel.Task, hdrAddr, dataAddr usermem.Addr) string { +func capData(t *kernel.Task, hdrAddr, dataAddr hostarch.Addr) string { if dataAddr == 0 { return "null" } diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD index 3dcf36a96..408a6c422 100644 --- a/pkg/sentry/syscalls/linux/BUILD +++ b/pkg/sentry/syscalls/linux/BUILD @@ -64,6 +64,7 @@ go_library( "//pkg/abi/linux", "//pkg/bpf", "//pkg/context", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go index ac53a0c0e..2d2212605 100644 --- a/pkg/sentry/syscalls/linux/linux64.go +++ b/pkg/sentry/syscalls/linux/linux64.go @@ -18,11 +18,11 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/syscalls" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) const ( @@ -405,7 +405,7 @@ var AMD64 = &kernel.SyscallTable{ 434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil), 435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil), }, - Emulate: map[usermem.Addr]uintptr{ + Emulate: map[hostarch.Addr]uintptr{ 0xffffffffff600000: 96, // vsyscall gettimeofday(2) 0xffffffffff600400: 201, // vsyscall time(2) 0xffffffffff600800: 309, // vsyscall getcpu(2) @@ -723,7 +723,7 @@ var ARM64 = &kernel.SyscallTable{ 434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil), 435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil), }, - Emulate: map[usermem.Addr]uintptr{}, + Emulate: map[hostarch.Addr]uintptr{}, Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) { t.Kernel().EmitUnimplementedEvent(t) return 0, syserror.ENOSYS diff --git a/pkg/sentry/syscalls/linux/sigset.go b/pkg/sentry/syscalls/linux/sigset.go index 434559b80..e8c2d8f9e 100644 --- a/pkg/sentry/syscalls/linux/sigset.go +++ b/pkg/sentry/syscalls/linux/sigset.go @@ -16,9 +16,9 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // CopyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and @@ -27,7 +27,7 @@ import ( // TODO(gvisor.dev/issue/1624): This is only exported because // syscalls/vfs2/signal.go depends on it. Once vfs1 is deleted and the vfs2 // syscalls are moved into this package, then they can be unexported. -func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.SignalSet, error) { +func CopyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) { if size != linux.SignalSetSize { return 0, syserror.EINVAL } @@ -35,14 +35,14 @@ func CopyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.Sig if _, err := t.CopyInBytes(sigSetAddr, b); err != nil { return 0, err } - mask := usermem.ByteOrder.Uint64(b[:]) + mask := hostarch.ByteOrder.Uint64(b[:]) return linux.SignalSet(mask) &^ kernel.UnblockableSignals, nil } // copyOutSigSet copies out a sigset_t. -func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet) error { +func copyOutSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, mask linux.SignalSet) error { b := t.CopyScratchBuffer(8) - usermem.ByteOrder.PutUint64(b, uint64(mask)) + hostarch.ByteOrder.PutUint64(b, uint64(mask)) _, err := t.CopyOutBytes(sigSetAddr, b) return err } @@ -55,15 +55,15 @@ func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet // }; // // and returns sigset_addr and size. -func copyInSigSetWithSize(t *kernel.Task, addr usermem.Addr) (usermem.Addr, uint, error) { +func copyInSigSetWithSize(t *kernel.Task, addr hostarch.Addr) (hostarch.Addr, uint, error) { switch t.Arch().Width() { case 8: in := t.CopyScratchBuffer(16) if _, err := t.CopyInBytes(addr, in); err != nil { return 0, 0, err } - maskAddr := usermem.Addr(usermem.ByteOrder.Uint64(in[0:])) - maskSize := uint(usermem.ByteOrder.Uint64(in[8:])) + maskAddr := hostarch.Addr(hostarch.ByteOrder.Uint64(in[0:])) + maskSize := uint(hostarch.ByteOrder.Uint64(in[8:])) return maskAddr, maskSize, nil default: return 0, 0, syserror.ENOSYS diff --git a/pkg/sentry/syscalls/linux/sys_aio.go b/pkg/sentry/syscalls/linux/sys_aio.go index c2285f796..70e8569a8 100644 --- a/pkg/sentry/syscalls/linux/sys_aio.go +++ b/pkg/sentry/syscalls/linux/sys_aio.go @@ -17,6 +17,7 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -152,7 +153,7 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S } // Keep rolling. - eventsAddr += usermem.Addr(linux.IOEventSize) + eventsAddr += hostarch.Addr(linux.IOEventSize) } // Everything finished. @@ -191,12 +192,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error) // I/O. switch cb.OpCode { case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE: - return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{ AddressSpaceActive: false, }) case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV: - return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{ AddressSpaceActive: false, }) @@ -219,7 +220,7 @@ func IoCancel(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc // LINT.IfChange -func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback { +func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, actx *mm.AIOContext, eventFile *fs.File) kernel.AIOCallback { return func(ctx context.Context) { if actx.Dead() { actx.CancelPendingRequest() @@ -264,7 +265,7 @@ func getAIOCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *linu } // submitCallback processes a single callback. -func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error { +func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error { file := t.GetFile(cb.FD) if file == nil { // File not found. @@ -339,7 +340,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc for i := int32(0); i < nrEvents; i++ { // Copy in the callback address. - var cbAddr usermem.Addr + var cbAddr hostarch.Addr switch t.Arch().Width() { case 8: var cbAddrP primitive.Uint64 @@ -351,7 +352,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc // Nothing done. return 0, nil, err } - cbAddr = usermem.Addr(cbAddrP) + cbAddr = hostarch.Addr(cbAddrP) default: return 0, nil, syserror.ENOSYS } @@ -379,7 +380,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc } // Advance to the next one. - addr += usermem.Addr(t.Arch().Width()) + addr += hostarch.Addr(t.Arch().Width()) } return uintptr(nrEvents), nil, nil diff --git a/pkg/sentry/syscalls/linux/sys_file.go b/pkg/sentry/syscalls/linux/sys_file.go index fd9649340..9cd238efd 100644 --- a/pkg/sentry/syscalls/linux/sys_file.go +++ b/pkg/sentry/syscalls/linux/sys_file.go @@ -18,6 +18,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -29,7 +30,6 @@ import ( ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // fileOpAt performs an operation on the second last component in the path. @@ -115,7 +115,7 @@ func fileOpOn(t *kernel.Task, dirFD int32, path string, resolve bool, fn func(ro } // copyInPath copies a path in. -func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string, dirPath bool, err error) { +func copyInPath(t *kernel.Task, addr hostarch.Addr, allowEmpty bool) (path string, dirPath bool, err error) { path, err = t.CopyInString(addr, linux.PATH_MAX) if err != nil { return "", false, err @@ -133,7 +133,7 @@ func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string // LINT.IfChange -func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uintptr, err error) { +func openAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint) (fd uintptr, err error) { path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return 0, err @@ -208,7 +208,7 @@ func openAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint) (fd uint return fd, err // Use result in frame. } -func mknodAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error { +func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error { path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return err @@ -301,7 +301,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca return 0, nil, mknodAt(t, dirFD, path, mode) } -func createAt(t *kernel.Task, dirFD int32, addr usermem.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) { +func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) { path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return 0, err @@ -515,7 +515,7 @@ func (ac accessContext) Value(key interface{}) interface{} { } } -func accessAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode uint) error { +func accessAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode uint) error { const rOK = 4 const wOK = 2 const xOK = 1 @@ -694,7 +694,7 @@ func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal } // Top it off with a terminator. - _, err = t.CopyOutBytes(addr+usermem.Addr(bytes), []byte("\x00")) + _, err = t.CopyOutBytes(addr+hostarch.Addr(bytes), []byte("\x00")) return uintptr(bytes + 1), nil, err } @@ -1164,7 +1164,7 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys // LINT.IfChange -func mkdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr, mode linux.FileMode) error { +func mkdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMode) error { path, _, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return err @@ -1216,7 +1216,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca return 0, nil, mkdirAt(t, dirFD, addr, mode) } -func rmdirAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error { +func rmdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error { path, _, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return err @@ -1256,7 +1256,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall return 0, nil, rmdirAt(t, linux.AT_FDCWD, addr) } -func symlinkAt(t *kernel.Task, dirFD int32, newAddr usermem.Addr, oldAddr usermem.Addr) error { +func symlinkAt(t *kernel.Task, dirFD int32, newAddr hostarch.Addr, oldAddr hostarch.Addr) error { newPath, dirPath, err := copyInPath(t, newAddr, false /* allowEmpty */) if err != nil { return err @@ -1341,7 +1341,7 @@ func mayLinkAt(t *kernel.Task, target *fs.Inode) error { // linkAt creates a hard link to the target specified by oldDirFD and oldAddr, // specified by newDirFD and newAddr. If resolve is true, then the symlinks // will be followed when evaluating the target. -func linkAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr, resolve, allowEmpty bool) error { +func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr, resolve, allowEmpty bool) error { oldPath, _, err := copyInPath(t, oldAddr, allowEmpty) if err != nil { return err @@ -1448,7 +1448,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal // LINT.IfChange -func readlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr, bufAddr usermem.Addr, size uint) (copied uintptr, err error) { +func readlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, bufAddr hostarch.Addr, size uint) (copied uintptr, err error) { path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return 0, err @@ -1511,7 +1511,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy // LINT.IfChange -func unlinkAt(t *kernel.Task, dirFD int32, addr usermem.Addr) error { +func unlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error { path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return err @@ -1728,7 +1728,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error { return nil } -func chownAt(t *kernel.Task, fd int32, addr usermem.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error { +func chownAt(t *kernel.Task, fd int32, addr hostarch.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error { path, _, err := copyInPath(t, addr, allowEmpty) if err != nil { return err @@ -1815,7 +1815,7 @@ func chmod(t *kernel.Task, d *fs.Dirent, mode linux.FileMode) error { return nil } -func chmodAt(t *kernel.Task, fd int32, addr usermem.Addr, mode linux.FileMode) error { +func chmodAt(t *kernel.Task, fd int32, addr hostarch.Addr, mode linux.FileMode) error { path, _, err := copyInPath(t, addr, false /* allowEmpty */) if err != nil { return err @@ -1866,7 +1866,7 @@ func defaultSetToSystemTimeSpec() fs.TimeSpec { } } -func utimes(t *kernel.Task, dirFD int32, addr usermem.Addr, ts fs.TimeSpec, resolve bool) error { +func utimes(t *kernel.Task, dirFD int32, addr hostarch.Addr, ts fs.TimeSpec, resolve bool) error { setTimestamp := func(root *fs.Dirent, d *fs.Dirent, _ uint) error { // Does the task own the file? if !d.Inode.CheckOwnership(t) { @@ -2030,7 +2030,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys // LINT.IfChange -func renameAt(t *kernel.Task, oldDirFD int32, oldAddr usermem.Addr, newDirFD int32, newAddr usermem.Addr) error { +func renameAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int32, newAddr hostarch.Addr) error { newPath, _, err := copyInPath(t, newAddr, false /* allowEmpty */) if err != nil { return err diff --git a/pkg/sentry/syscalls/linux/sys_futex.go b/pkg/sentry/syscalls/linux/sys_futex.go index f39ce0639..eeea1613b 100644 --- a/pkg/sentry/syscalls/linux/sys_futex.go +++ b/pkg/sentry/syscalls/linux/sys_futex.go @@ -18,11 +18,11 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // futexWaitRestartBlock encapsulates the state required to restart futex(2) @@ -41,7 +41,7 @@ type futexWaitRestartBlock struct { // Restart implements kernel.SyscallRestartBlock.Restart. func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) { - return futexWaitDuration(t, f.duration, false, usermem.Addr(f.addr), f.private, f.val, f.mask) + return futexWaitDuration(t, f.duration, false, hostarch.Addr(f.addr), f.private, f.val, f.mask) } // futexWaitAbsolute performs a FUTEX_WAIT_BITSET, blocking until the wait is @@ -51,7 +51,7 @@ func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) { // // If blocking is interrupted, the syscall is restarted with the original // arguments. -func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) { +func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) { w := t.FutexWaiter() err := t.Futex().WaitPrepare(w, t, addr, private, val, mask) if err != nil { @@ -87,7 +87,7 @@ func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, fo // syscall. If forever is true, the syscall is restarted with the original // arguments. If forever is false, duration is a relative timeout and the // syscall is restarted with the remaining timeout. -func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr usermem.Addr, private bool, val, mask uint32) (uintptr, error) { +func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr hostarch.Addr, private bool, val, mask uint32) (uintptr, error) { w := t.FutexWaiter() err := t.Futex().WaitPrepare(w, t, addr, private, val, mask) if err != nil { @@ -124,7 +124,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add return 0, syserror.ERESTART_RESTARTBLOCK } -func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.Addr, private bool) error { +func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool) error { w := t.FutexWaiter() locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, false) if err != nil { @@ -152,7 +152,7 @@ func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr usermem.A return syserror.ConvertIntr(err, syserror.ERESTARTSYS) } -func tryLockPI(t *kernel.Task, addr usermem.Addr, private bool) error { +func tryLockPI(t *kernel.Task, addr hostarch.Addr, private bool) error { w := t.FutexWaiter() locked, err := t.Futex().LockPI(w, t, addr, uint32(t.ThreadID()), private, true) if err != nil { diff --git a/pkg/sentry/syscalls/linux/sys_getdents.go b/pkg/sentry/syscalls/linux/sys_getdents.go index b25f7d881..bbba71d8f 100644 --- a/pkg/sentry/syscalls/linux/sys_getdents.go +++ b/pkg/sentry/syscalls/linux/sys_getdents.go @@ -19,6 +19,7 @@ import ( "io" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" @@ -62,7 +63,7 @@ func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy // getdents implements the core of getdents(2)/getdents64(2). // f is the syscall implementation dirent serialization function. -func getdents(t *kernel.Task, fd int32, addr usermem.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) { +func getdents(t *kernel.Task, fd int32, addr hostarch.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) { dir := t.GetFile(fd) if dir == nil { return 0, syserror.EBADF diff --git a/pkg/sentry/syscalls/linux/sys_mempolicy.go b/pkg/sentry/syscalls/linux/sys_mempolicy.go index 9b4a5c3f1..6d27f4292 100644 --- a/pkg/sentry/syscalls/linux/sys_mempolicy.go +++ b/pkg/sentry/syscalls/linux/sys_mempolicy.go @@ -18,6 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" @@ -31,7 +32,7 @@ const ( allowedNodemask = (1 << maxNodes) - 1 ) -func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, error) { +func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64, error) { // "nodemask points to a bit mask of node IDs that contains up to maxnode // bits. The bit mask size is rounded to the next multiple of // sizeof(unsigned long), but the kernel will use bits only up to maxnode. @@ -41,7 +42,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, // because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses // maxnode-1, not maxnode, as the number of bits. bits := maxnode - 1 - if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0 + if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0 return 0, syserror.EINVAL } if bits == 0 { @@ -53,7 +54,7 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, if _, err := t.CopyInBytes(addr, buf); err != nil { return 0, err } - val := usermem.ByteOrder.Uint64(buf) + val := hostarch.ByteOrder.Uint64(buf) // Check that only allowed bits in the first unsigned long in the nodemask // are set. if val&^allowedNodemask != 0 { @@ -68,11 +69,11 @@ func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, return val, nil } -func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint64) error { +func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uint64) error { // mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of // bits. bits := maxnode - 1 - if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0 + if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0 return syserror.EINVAL } if bits == 0 { @@ -80,7 +81,7 @@ func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint } // Copy out the first unsigned long in the nodemask. buf := t.CopyScratchBuffer(8) - usermem.ByteOrder.PutUint64(buf, val) + hostarch.ByteOrder.PutUint64(buf, val) if _, err := t.CopyOutBytes(addr, buf); err != nil { return err } @@ -258,7 +259,7 @@ func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall return 0, nil, err } -func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask usermem.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) { +func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask hostarch.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) { flags := linux.NumaPolicy(modeWithFlags & linux.MPOL_MODE_FLAGS) mode := linux.NumaPolicy(modeWithFlags &^ linux.MPOL_MODE_FLAGS) if flags == linux.MPOL_MODE_FLAGS { diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go index cd8dfdfa4..70da0707d 100644 --- a/pkg/sentry/syscalls/linux/sys_mmap.go +++ b/pkg/sentry/syscalls/linux/sys_mmap.go @@ -23,7 +23,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/mm" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Brk implements linux syscall brk(2). @@ -61,12 +62,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC Unmap: fixed, Map32Bit: map32bit, Private: private, - Perms: usermem.AccessType{ + Perms: hostarch.AccessType{ Read: linux.PROT_READ&prot != 0, Write: linux.PROT_WRITE&prot != 0, Execute: linux.PROT_EXEC&prot != 0, }, - MaxPerms: usermem.AnyAccess, + MaxPerms: hostarch.AnyAccess, GrowsDown: linux.MAP_GROWSDOWN&flags != 0, Precommit: linux.MAP_POPULATE&flags != 0, } @@ -160,7 +161,7 @@ func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal func Mprotect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { length := args[1].Uint64() prot := args[2].Int() - err := t.MemoryManager().MProtect(args[0].Pointer(), length, usermem.AccessType{ + err := t.MemoryManager().MProtect(args[0].Pointer(), length, hostarch.AccessType{ Read: linux.PROT_READ&prot != 0, Write: linux.PROT_WRITE&prot != 0, Execute: linux.PROT_EXEC&prot != 0, @@ -183,7 +184,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca return 0, nil, nil } // Not explicitly stated: length need not be page-aligned. - lenAddr, ok := usermem.Addr(length).RoundUp() + lenAddr, ok := hostarch.Addr(length).RoundUp() if !ok { return 0, nil, syserror.EINVAL } @@ -232,7 +233,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca // "The length argument need not be a multiple of the page size, but since // residency information is returned for whole pages, length is effectively // rounded up to the next multiple of the page size." - mincore(2) - la, ok := usermem.Addr(length).RoundUp() + la, ok := hostarch.Addr(length).RoundUp() if !ok { return 0, nil, syserror.ENOMEM } @@ -247,7 +248,7 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca if mapped != uint64(la) { return 0, nil, syserror.ENOMEM } - resident := bytes.Repeat([]byte{1}, int(mapped/usermem.PageSize)) + resident := bytes.Repeat([]byte{1}, int(mapped/hostarch.PageSize)) _, err := t.CopyOutBytes(vec, resident) return 0, nil, err } diff --git a/pkg/sentry/syscalls/linux/sys_mount.go b/pkg/sentry/syscalls/linux/sys_mount.go index bd0633564..864d2138c 100644 --- a/pkg/sentry/syscalls/linux/sys_mount.go +++ b/pkg/sentry/syscalls/linux/sys_mount.go @@ -20,7 +20,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Mount implements Linux syscall mount(2). @@ -31,7 +32,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall flags := args[3].Uint64() dataAddr := args[4].Pointer() - fsType, err := t.CopyInString(typeAddr, usermem.PageSize) + fsType, err := t.CopyInString(typeAddr, hostarch.PageSize) if err != nil { return 0, nil, err } @@ -52,7 +53,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall // character placement, and the address is passed to each file system. // Most file systems always treat this data as a string, though, and so // do all of the ones we implement. - data, err = t.CopyInString(dataAddr, usermem.PageSize) + data, err = t.CopyInString(dataAddr, hostarch.PageSize) if err != nil { return 0, nil, err } diff --git a/pkg/sentry/syscalls/linux/sys_pipe.go b/pkg/sentry/syscalls/linux/sys_pipe.go index f7135ea46..d95034347 100644 --- a/pkg/sentry/syscalls/linux/sys_pipe.go +++ b/pkg/sentry/syscalls/linux/sys_pipe.go @@ -16,19 +16,19 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/pipe" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange // pipe2 implements the actual system call with flags. -func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) { +func pipe2(t *kernel.Task, addr hostarch.Addr, flags uint) (uintptr, error) { if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 { return 0, syserror.EINVAL } diff --git a/pkg/sentry/syscalls/linux/sys_poll.go b/pkg/sentry/syscalls/linux/sys_poll.go index 254f4c9f9..da548a14a 100644 --- a/pkg/sentry/syscalls/linux/sys_poll.go +++ b/pkg/sentry/syscalls/linux/sys_poll.go @@ -18,13 +18,13 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" "gvisor.dev/gvisor/pkg/waiter" ) @@ -155,7 +155,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time. } // CopyInPollFDs copies an array of struct pollfd unless nfds exceeds the max. -func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) { +func CopyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) { if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) { return nil, syserror.EINVAL } @@ -170,7 +170,7 @@ func CopyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD return pfd, nil } -func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) { +func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) { pfd, err := CopyInPollFDs(t, addr, nfds) if err != nil { return timeout, 0, err @@ -198,7 +198,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) } // CopyInFDSet copies an fd set from select(2)/pselect(2). -func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) { +func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) { set := make([]byte, nBytes) if addr != 0 { @@ -215,7 +215,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy return set, nil } -func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) { +func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) { if nfds < 0 || nfds > fileCap { return 0, syserror.EINVAL } @@ -365,7 +365,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration) // copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr. // // startNs must be from CLOCK_MONOTONIC. -func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error { +func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error { if timeout <= 0 { return nil } @@ -377,7 +377,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D // copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr. // // startNs must be from CLOCK_MONOTONIC. -func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error { +func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error { if timeout <= 0 { return nil } @@ -391,7 +391,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du // // +stateify savable type pollRestartBlock struct { - pfdAddr usermem.Addr + pfdAddr hostarch.Addr nfds uint timeout time.Duration } @@ -401,7 +401,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) { return poll(t, p.pfdAddr, p.nfds, p.timeout) } -func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) { +func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) { remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout) // On an interrupt poll(2) is restarted with the remaining timeout. if err == syserror.EINTR { diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go index c0aa0fd60..ae545f80f 100644 --- a/pkg/sentry/syscalls/linux/sys_random.go +++ b/pkg/sentry/syscalls/linux/sys_random.go @@ -24,6 +24,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) const ( @@ -64,7 +66,7 @@ func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys if min > 256 { min = 256 } - n, err := t.MemoryManager().CopyOutFrom(t, usermem.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{ + n, err := t.MemoryManager().CopyOutFrom(t, hostarch.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{ AddressSpaceActive: true, }) if n >= int64(min) { diff --git a/pkg/sentry/syscalls/linux/sys_rlimit.go b/pkg/sentry/syscalls/linux/sys_rlimit.go index 88cd234d1..e64246d57 100644 --- a/pkg/sentry/syscalls/linux/sys_rlimit.go +++ b/pkg/sentry/syscalls/linux/sys_rlimit.go @@ -16,12 +16,12 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // rlimit describes an implementation of 'struct rlimit', which may vary from @@ -67,12 +67,12 @@ func (r *rlimit64) fromLimit(lim limits.Limit) { } } -func (r *rlimit64) copyIn(t *kernel.Task, addr usermem.Addr) error { +func (r *rlimit64) copyIn(t *kernel.Task, addr hostarch.Addr) error { _, err := r.CopyIn(t, addr) return err } -func (r *rlimit64) copyOut(t *kernel.Task, addr usermem.Addr) error { +func (r *rlimit64) copyOut(t *kernel.Task, addr hostarch.Addr) error { _, err := r.CopyOut(t, addr) return err } diff --git a/pkg/sentry/syscalls/linux/sys_seccomp.go b/pkg/sentry/syscalls/linux/sys_seccomp.go index 4fdb4463c..e16d6ff3f 100644 --- a/pkg/sentry/syscalls/linux/sys_seccomp.go +++ b/pkg/sentry/syscalls/linux/sys_seccomp.go @@ -17,10 +17,10 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/bpf" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // userSockFprog is equivalent to Linux's struct sock_fprog on amd64. @@ -33,14 +33,14 @@ type userSockFprog struct { _ [6]byte // padding for alignment // Filter is a user pointer to the struct sock_filter array that makes up - // the filter program. Filter is a uint64 rather than a usermem.Addr - // because usermem.Addr is actually uintptr, which is not a fixed-size + // the filter program. Filter is a uint64 rather than a hostarch.Addr + // because hostarch.Addr is actually uintptr, which is not a fixed-size // type. Filter uint64 } // seccomp applies a seccomp policy to the current task. -func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error { +func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error { // We only support SECCOMP_SET_MODE_FILTER at the moment. if mode != linux.SECCOMP_SET_MODE_FILTER { // Unsupported mode. @@ -60,7 +60,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr usermem.Addr) error { return err } filter := make([]linux.BPFInstruction, int(fprog.Len)) - if _, err := linux.CopyBPFInstructionSliceIn(t, usermem.Addr(fprog.Filter), filter); err != nil { + if _, err := linux.CopyBPFInstructionSliceIn(t, hostarch.Addr(fprog.Filter), filter); err != nil { return err } compiledFilter, err := bpf.Compile(filter) diff --git a/pkg/sentry/syscalls/linux/sys_sem.go b/pkg/sentry/syscalls/linux/sys_sem.go index f0570d927..c84260080 100644 --- a/pkg/sentry/syscalls/linux/sys_sem.go +++ b/pkg/sentry/syscalls/linux/sys_sem.go @@ -19,13 +19,13 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) const opsMax = 500 // SEMOPM @@ -310,7 +310,7 @@ func setVal(t *kernel.Task, id int32, num int32, val int16) error { return set.SetVal(t, num, val, creds, int32(pid)) } -func setValAll(t *kernel.Task, id int32, array usermem.Addr) error { +func setValAll(t *kernel.Task, id int32, array hostarch.Addr) error { r := t.IPCNamespace().SemaphoreRegistry() set := r.FindByID(id) if set == nil { @@ -335,7 +335,7 @@ func getVal(t *kernel.Task, id int32, num int32) (int16, error) { return set.GetVal(num, creds) } -func getValAll(t *kernel.Task, id int32, array usermem.Addr) error { +func getValAll(t *kernel.Task, id int32, array hostarch.Addr) error { r := t.IPCNamespace().SemaphoreRegistry() set := r.FindByID(id) if set == nil { diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go index d639c9bf7..53b12dc41 100644 --- a/pkg/sentry/syscalls/linux/sys_signal.go +++ b/pkg/sentry/syscalls/linux/sys_signal.go @@ -19,12 +19,12 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/signalfd" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // "For a process to have permission to send a signal it must @@ -516,7 +516,7 @@ func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne } // sharedSignalfd is shared between the two calls. -func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) { +func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) { // Copy in the signal mask. mask, err := CopyInSigSet(t, sigset, sigsetsize) if err != nil { diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go index c6adfe06b..9bdf6d3d8 100644 --- a/pkg/sentry/syscalls/linux/sys_socket.go +++ b/pkg/sentry/syscalls/linux/sys_socket.go @@ -18,6 +18,7 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" @@ -117,7 +118,7 @@ type multipleMessageHeader64 struct { // CaptureAddress allocates memory for and copies a socket address structure // from the untrusted address space range. -func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) { +func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) { if addrlen > maxAddrLen { return nil, syserror.EINVAL } @@ -133,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, // writeAddress writes a sockaddr structure and its length to an output buffer // in the unstrusted address space range. If the address is bigger than the // buffer, it is truncated. -func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error { +func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error { // Get the buffer length. var bufLen uint32 if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil { @@ -276,7 +277,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca // accept is the implementation of the accept syscall. It is called by accept // and accept4 syscall handlers. -func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) { +func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) { // Check that no unsupported flags are passed in. if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { return 0, syserror.EINVAL @@ -472,7 +473,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy // getSockOpt tries to handle common socket options, or dispatches to a specific // socket implementation. -func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) { +func getSockOpt(t *kernel.Task, s socket.Socket, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) { if level == linux.SOL_SOCKET { switch name { case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL: @@ -735,7 +736,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return uintptr(count), nil, nil } -func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { +func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { // Capture the message header and io vectors. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { @@ -745,7 +746,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i if msg.IovLen > linux.UIO_MAXIOV { return 0, syserror.EMSGSIZE } - dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { @@ -796,7 +797,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i // Copy the address to the caller. if msg.NameLen != 0 { - if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil { + if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil { return 0, err } } @@ -806,7 +807,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i return 0, err } if len(controlData) > 0 { - if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil { + if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } @@ -821,7 +822,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags i // recvFrom is the implementation of the recvfrom syscall. It is called by // recvfrom and recv syscall handlers. -func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) { +func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) { if int(bufLen) < 0 { return 0, syserror.EINVAL } @@ -997,7 +998,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return uintptr(count), nil, nil } -func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr usermem.Addr, flags int32) (uintptr, error) { +func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostarch.Addr, flags int32) (uintptr, error) { // Capture the message header. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { @@ -1011,7 +1012,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme return 0, syserror.ENOBUFS } controlData = make([]byte, msg.ControlLen) - if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil { + if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } @@ -1020,7 +1021,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme var to []byte if msg.NameLen != 0 { var err error - to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen) + to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen) if err != nil { return 0, err } @@ -1030,7 +1031,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme if msg.IovLen > linux.UIO_MAXIOV { return 0, syserror.EMSGSIZE } - src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { @@ -1064,7 +1065,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr userme // sendTo is the implementation of the sendto syscall. It is called by sendto // and send syscall handlers. -func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) { +func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) { bl := int(bufLen) if bl < 0 { return 0, syserror.EINVAL diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go index cda29a8b5..2338ba44b 100644 --- a/pkg/sentry/syscalls/linux/sys_stat.go +++ b/pkg/sentry/syscalls/linux/sys_stat.go @@ -16,11 +16,11 @@ package linux import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange @@ -106,7 +106,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall } // stat implements stat from the given *fs.Dirent. -func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) error { +func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr hostarch.Addr) error { if dirPath && !fs.IsDir(d.Inode.StableAttr) { return syserror.ENOTDIR } @@ -120,7 +120,7 @@ func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) err } // fstat implements fstat for the given *fs.File. -func fstat(t *kernel.Task, f *fs.File, statAddr usermem.Addr) error { +func fstat(t *kernel.Task, f *fs.File, statAddr hostarch.Addr) error { uattr, err := f.UnstableAttr(t) if err != nil { return err @@ -180,7 +180,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall }) } -func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr usermem.Addr) error { +func statx(t *kernel.Task, sattr fs.StableAttr, uattr fs.UnstableAttr, statxAddr hostarch.Addr) error { // "[T]he kernel may return fields that weren't requested and may fail to // return fields that were requested, depending on what the backing // filesystem supports. @@ -257,7 +257,7 @@ func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca // statfsImpl implements the linux syscall statfs and fstatfs based on a Dirent, // copying the statfs structure out to addr on success, otherwise an error is // returned. -func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error { +func statfsImpl(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr) error { info, err := d.Inode.StatFS(t) if err != nil { return err diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go index b5f920949..3185ea527 100644 --- a/pkg/sentry/syscalls/linux/sys_thread.go +++ b/pkg/sentry/syscalls/linux/sys_thread.go @@ -19,6 +19,7 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" @@ -46,7 +47,7 @@ var ( ExecMaxTotalSize = 2 * 1024 * 1024 // ExecMaxElemSize is the maximum length of a single argv or envv entry. - ExecMaxElemSize = 32 * usermem.PageSize + ExecMaxElemSize = 32 * hostarch.PageSize ) // Getppid implements linux syscall getppid(2). @@ -88,7 +89,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return execveat(t, dirFD, pathnameAddr, argvAddr, envvAddr, flags) } -func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) { +func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) { pathname, err := t.CopyInString(pathnameAddr, linux.PATH_MAX) if err != nil { return 0, nil, err @@ -199,7 +200,7 @@ func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys } // clone is used by Clone, Fork, and VFork. -func clone(t *kernel.Task, flags int, stack usermem.Addr, parentTID usermem.Addr, childTID usermem.Addr, tls usermem.Addr) (uintptr, *kernel.SyscallControl, error) { +func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) { opts := kernel.CloneOptions{ SharingOptions: kernel.SharingOptions{ NewAddressSpace: flags&linux.CLONE_VM == 0, @@ -274,7 +275,7 @@ func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error { } // wait4 waits for the given child process to exit. -func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) { +func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusageAddr hostarch.Addr) (uintptr, error) { if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 { return 0, syserror.EINVAL } diff --git a/pkg/sentry/syscalls/linux/sys_time.go b/pkg/sentry/syscalls/linux/sys_time.go index c5054d2f1..83b777bbd 100644 --- a/pkg/sentry/syscalls/linux/sys_time.go +++ b/pkg/sentry/syscalls/linux/sys_time.go @@ -19,12 +19,12 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/marshal/primitive" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/kernel" ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // The most significant 29 bits hold either a pid or a file descriptor. @@ -165,7 +165,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC addr := args[0].Pointer() r := t.Kernel().RealtimeClock().Now().TimeT() - if addr == usermem.Addr(0) { + if addr == hostarch.Addr(0) { return uintptr(r), nil, nil } @@ -182,7 +182,7 @@ func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC type clockNanosleepRestartBlock struct { c ktime.Clock duration time.Duration - rem usermem.Addr + rem hostarch.Addr } // Restart implements kernel.SyscallRestartBlock.Restart. @@ -221,7 +221,7 @@ func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, ts linux.Timespec) error // // If blocking is interrupted, the syscall is restarted with the remaining // duration timeout. -func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem usermem.Addr) error { +func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem hostarch.Addr) error { timer, start, tchan := ktime.After(c, dur) err := t.BlockWithTimer(nil, tchan) @@ -324,14 +324,14 @@ func Gettimeofday(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel. tv := args[0].Pointer() tz := args[1].Pointer() - if tv != usermem.Addr(0) { + if tv != hostarch.Addr(0) { nowTv := t.Kernel().RealtimeClock().Now().Timeval() if err := copyTimevalOut(t, tv, &nowTv); err != nil { return 0, nil, err } } - if tz != usermem.Addr(0) { + if tz != hostarch.Addr(0) { // Ask the time package for the timezone. _, offset := time.Now().Zone() // This int32 array mimics linux's struct timezone. diff --git a/pkg/sentry/syscalls/linux/sys_xattr.go b/pkg/sentry/syscalls/linux/sys_xattr.go index 97474fd3c..28ad6a60e 100644 --- a/pkg/sentry/syscalls/linux/sys_xattr.go +++ b/pkg/sentry/syscalls/linux/sys_xattr.go @@ -18,11 +18,11 @@ import ( "strings" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/fs" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // LINT.IfChange @@ -87,7 +87,7 @@ func getXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink } // getXattr implements getxattr(2) from the given *fs.Dirent. -func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64) (int, error) { +func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64) (int, error) { name, err := copyInXattrName(t, nameAddr) if err != nil { return 0, err @@ -180,7 +180,7 @@ func setXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink } // setXattr implements setxattr(2) from the given *fs.Dirent. -func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, size uint64, flags uint32) error { +func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64, flags uint32) error { if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 { return syserror.EINVAL } @@ -214,7 +214,7 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr usermem.Addr, si return nil } -func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) { +func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) { name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1) if err != nil { if err == syserror.ENAMETOOLONG { @@ -306,7 +306,7 @@ func listXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlin return uintptr(n), nil, nil } -func listXattr(t *kernel.Task, d *fs.Dirent, addr usermem.Addr, size uint64) (int, error) { +func listXattr(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr, size uint64) (int, error) { if !xattrFileTypeOk(d.Inode) { return 0, nil } @@ -408,7 +408,7 @@ func removeXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSyml } // removeXattr implements removexattr(2) from the given *fs.Dirent. -func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr usermem.Addr) error { +func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr hostarch.Addr) error { name, err := copyInXattrName(t, nameAddr) if err != nil { return err diff --git a/pkg/sentry/syscalls/linux/timespec.go b/pkg/sentry/syscalls/linux/timespec.go index ddc3ee26e..3edc922eb 100644 --- a/pkg/sentry/syscalls/linux/timespec.go +++ b/pkg/sentry/syscalls/linux/timespec.go @@ -18,13 +18,13 @@ import ( "time" "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // copyTimespecIn copies a Timespec from the untrusted app range to the kernel. -func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) { +func copyTimespecIn(t *kernel.Task, addr hostarch.Addr) (linux.Timespec, error) { switch t.Arch().Width() { case 8: ts := linux.Timespec{} @@ -33,8 +33,8 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) { if err != nil { return ts, err } - ts.Sec = int64(usermem.ByteOrder.Uint64(in[0:])) - ts.Nsec = int64(usermem.ByteOrder.Uint64(in[8:])) + ts.Sec = int64(hostarch.ByteOrder.Uint64(in[0:])) + ts.Nsec = int64(hostarch.ByteOrder.Uint64(in[8:])) return ts, nil default: return linux.Timespec{}, syserror.ENOSYS @@ -42,12 +42,12 @@ func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) { } // copyTimespecOut copies a Timespec to the untrusted app range. -func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) error { +func copyTimespecOut(t *kernel.Task, addr hostarch.Addr, ts *linux.Timespec) error { switch t.Arch().Width() { case 8: out := t.CopyScratchBuffer(16) - usermem.ByteOrder.PutUint64(out[0:], uint64(ts.Sec)) - usermem.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec)) + hostarch.ByteOrder.PutUint64(out[0:], uint64(ts.Sec)) + hostarch.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec)) _, err := t.CopyOutBytes(addr, out) return err default: @@ -56,7 +56,7 @@ func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) erro } // copyTimevalIn copies a Timeval from the untrusted app range to the kernel. -func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) { +func copyTimevalIn(t *kernel.Task, addr hostarch.Addr) (linux.Timeval, error) { switch t.Arch().Width() { case 8: tv := linux.Timeval{} @@ -65,8 +65,8 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) { if err != nil { return tv, err } - tv.Sec = int64(usermem.ByteOrder.Uint64(in[0:])) - tv.Usec = int64(usermem.ByteOrder.Uint64(in[8:])) + tv.Sec = int64(hostarch.ByteOrder.Uint64(in[0:])) + tv.Usec = int64(hostarch.ByteOrder.Uint64(in[8:])) return tv, nil default: return linux.Timeval{}, syserror.ENOSYS @@ -74,12 +74,12 @@ func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) { } // copyTimevalOut copies a Timeval to the untrusted app range. -func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error { +func copyTimevalOut(t *kernel.Task, addr hostarch.Addr, tv *linux.Timeval) error { switch t.Arch().Width() { case 8: out := t.CopyScratchBuffer(16) - usermem.ByteOrder.PutUint64(out[0:], uint64(tv.Sec)) - usermem.ByteOrder.PutUint64(out[8:], uint64(tv.Usec)) + hostarch.ByteOrder.PutUint64(out[0:], uint64(tv.Sec)) + hostarch.ByteOrder.PutUint64(out[8:], uint64(tv.Usec)) _, err := t.CopyOutBytes(addr, out) return err default: @@ -94,7 +94,7 @@ func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error // returned value is the maximum that Duration will allow. // // If timespecAddr is NULL, the returned value is negative. -func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) { +func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) { // Use a negative Duration to indicate "no timeout". timeout := time.Duration(-1) if timespecAddr != 0 { diff --git a/pkg/sentry/syscalls/linux/vfs2/BUILD b/pkg/sentry/syscalls/linux/vfs2/BUILD index 2e59bd5b1..5ce0bc714 100644 --- a/pkg/sentry/syscalls/linux/vfs2/BUILD +++ b/pkg/sentry/syscalls/linux/vfs2/BUILD @@ -43,6 +43,7 @@ go_library( "//pkg/context", "//pkg/fspath", "//pkg/gohacks", + "//pkg/hostarch", "//pkg/log", "//pkg/marshal", "//pkg/marshal/primitive", diff --git a/pkg/sentry/syscalls/linux/vfs2/aio.go b/pkg/sentry/syscalls/linux/vfs2/aio.go index de6789a65..fd1863ef3 100644 --- a/pkg/sentry/syscalls/linux/vfs2/aio.go +++ b/pkg/sentry/syscalls/linux/vfs2/aio.go @@ -26,6 +26,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // IoSubmit implements linux syscall io_submit(2). @@ -40,7 +42,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc for i := int32(0); i < nrEvents; i++ { // Copy in the callback address. - var cbAddr usermem.Addr + var cbAddr hostarch.Addr switch t.Arch().Width() { case 8: var cbAddrP primitive.Uint64 @@ -52,7 +54,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc // Nothing done. return 0, nil, err } - cbAddr = usermem.Addr(cbAddrP) + cbAddr = hostarch.Addr(cbAddrP) default: return 0, nil, syserror.ENOSYS } @@ -79,14 +81,14 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc } // Advance to the next one. - addr += usermem.Addr(t.Arch().Width()) + addr += hostarch.Addr(t.Arch().Width()) } return uintptr(nrEvents), nil, nil } // submitCallback processes a single callback. -func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr usermem.Addr) error { +func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error { if cb.Reserved2 != 0 { return syserror.EINVAL } @@ -148,7 +150,7 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr user return nil } -func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr usermem.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback { +func getAIOCallback(t *kernel.Task, fd, eventFD *vfs.FileDescription, cbAddr hostarch.Addr, cb *linux.IOCallback, ioseq usermem.IOSequence, aioCtx *mm.AIOContext) kernel.AIOCallback { return func(ctx context.Context) { // Release references after completing the callback. defer fd.DecRef(ctx) @@ -206,12 +208,12 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error) // I/O. switch cb.OpCode { case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PWRITE: - return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + return t.SingleIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{ AddressSpaceActive: false, }) case linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITEV: - return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + return t.IovecsIOSequence(hostarch.Addr(cb.Buf), bytes, usermem.IOOpts{ AddressSpaceActive: false, }) diff --git a/pkg/sentry/syscalls/linux/vfs2/execve.go b/pkg/sentry/syscalls/linux/vfs2/execve.go index 7a409620d..3315398a4 100644 --- a/pkg/sentry/syscalls/linux/vfs2/execve.go +++ b/pkg/sentry/syscalls/linux/vfs2/execve.go @@ -24,7 +24,8 @@ import ( slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Execve implements linux syscall execve(2). @@ -45,7 +46,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return execveat(t, dirfd, pathnameAddr, argvAddr, envvAddr, flags) } -func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr usermem.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) { +func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) { if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 { return 0, nil, syserror.EINVAL } diff --git a/pkg/sentry/syscalls/linux/vfs2/filesystem.go b/pkg/sentry/syscalls/linux/vfs2/filesystem.go index 01e0f9010..36aa1d3ae 100644 --- a/pkg/sentry/syscalls/linux/vfs2/filesystem.go +++ b/pkg/sentry/syscalls/linux/vfs2/filesystem.go @@ -20,7 +20,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Link implements Linux syscall link(2). @@ -40,7 +41,7 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal return 0, nil, linkat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags) } -func linkat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags int32) error { +func linkat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags int32) error { if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_FOLLOW) != 0 { return syserror.EINVAL } @@ -86,7 +87,7 @@ func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca return 0, nil, mkdirat(t, dirfd, addr, mode) } -func mkdirat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode uint) error { +func mkdirat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode uint) error { path, err := copyInPath(t, addr) if err != nil { return err @@ -118,7 +119,7 @@ func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca return 0, nil, mknodat(t, dirfd, addr, linux.FileMode(mode), dev) } -func mknodat(t *kernel.Task, dirfd int32, addr usermem.Addr, mode linux.FileMode, dev uint32) error { +func mknodat(t *kernel.Task, dirfd int32, addr hostarch.Addr, mode linux.FileMode, dev uint32) error { path, err := copyInPath(t, addr) if err != nil { return err @@ -165,7 +166,7 @@ func Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall return openat(t, linux.AT_FDCWD, addr, linux.O_WRONLY|linux.O_CREAT|linux.O_TRUNC, mode) } -func openat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) { +func openat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, flags uint32, mode uint) (uintptr, *kernel.SyscallControl, error) { path, err := copyInPath(t, pathAddr) if err != nil { return 0, nil, err @@ -217,7 +218,7 @@ func Renameat2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys return 0, nil, renameat(t, olddirfd, oldpathAddr, newdirfd, newpathAddr, flags) } -func renameat(t *kernel.Task, olddirfd int32, oldpathAddr usermem.Addr, newdirfd int32, newpathAddr usermem.Addr, flags uint32) error { +func renameat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags uint32) error { oldpath, err := copyInPath(t, oldpathAddr) if err != nil { return err @@ -250,7 +251,7 @@ func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall return 0, nil, rmdirat(t, linux.AT_FDCWD, pathAddr) } -func rmdirat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error { +func rmdirat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error { path, err := copyInPath(t, pathAddr) if err != nil { return err @@ -269,7 +270,7 @@ func Unlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal return 0, nil, unlinkat(t, linux.AT_FDCWD, pathAddr) } -func unlinkat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr) error { +func unlinkat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr) error { path, err := copyInPath(t, pathAddr) if err != nil { return err @@ -313,7 +314,7 @@ func Symlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys return 0, nil, symlinkat(t, targetAddr, newdirfd, linkpathAddr) } -func symlinkat(t *kernel.Task, targetAddr usermem.Addr, newdirfd int32, linkpathAddr usermem.Addr) error { +func symlinkat(t *kernel.Task, targetAddr hostarch.Addr, newdirfd int32, linkpathAddr hostarch.Addr) error { target, err := t.CopyInString(targetAddr, linux.PATH_MAX) if err != nil { return err diff --git a/pkg/sentry/syscalls/linux/vfs2/getdents.go b/pkg/sentry/syscalls/linux/vfs2/getdents.go index 5517595b5..b41a3056a 100644 --- a/pkg/sentry/syscalls/linux/vfs2/getdents.go +++ b/pkg/sentry/syscalls/linux/vfs2/getdents.go @@ -22,7 +22,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Getdents implements Linux syscall getdents(2). @@ -58,7 +59,7 @@ func getdents(t *kernel.Task, args arch.SyscallArguments, isGetdents64 bool) (ui type getdentsCallback struct { t *kernel.Task - addr usermem.Addr + addr hostarch.Addr remaining int isGetdents64 bool } @@ -69,7 +70,7 @@ var getdentsCallbackPool = sync.Pool{ }, } -func getGetdentsCallback(t *kernel.Task, addr usermem.Addr, size int, isGetdents64 bool) *getdentsCallback { +func getGetdentsCallback(t *kernel.Task, addr hostarch.Addr, size int, isGetdents64 bool) *getdentsCallback { cb := getdentsCallbackPool.Get().(*getdentsCallback) *cb = getdentsCallback{ t: t, @@ -102,9 +103,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error { return syserror.EINVAL } buf = cb.t.CopyScratchBuffer(size) - usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino) - usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff)) - usermem.ByteOrder.PutUint16(buf[16:18], uint16(size)) + hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino) + hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff)) + hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size)) buf[18] = dirent.Type copy(buf[19:], dirent.Name) // Zero out all remaining bytes in buf, including the NUL terminator @@ -136,9 +137,9 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error { return syserror.EINVAL } buf = cb.t.CopyScratchBuffer(size) - usermem.ByteOrder.PutUint64(buf[0:8], dirent.Ino) - usermem.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff)) - usermem.ByteOrder.PutUint16(buf[16:18], uint16(size)) + hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino) + hostarch.ByteOrder.PutUint64(buf[8:16], uint64(dirent.NextOff)) + hostarch.ByteOrder.PutUint16(buf[16:18], uint16(size)) copy(buf[18:], dirent.Name) // Zero out all remaining bytes in buf, including the NUL terminator // after dirent.Name and the zero padding byte between the name and @@ -155,7 +156,7 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error { // cb.remaining. return err } - cb.addr += usermem.Addr(n) + cb.addr += hostarch.Addr(n) cb.remaining -= n return nil } diff --git a/pkg/sentry/syscalls/linux/vfs2/mmap.go b/pkg/sentry/syscalls/linux/vfs2/mmap.go index 9d9dbf775..c961545f6 100644 --- a/pkg/sentry/syscalls/linux/vfs2/mmap.go +++ b/pkg/sentry/syscalls/linux/vfs2/mmap.go @@ -21,7 +21,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Mmap implements Linux syscall mmap(2). @@ -48,12 +49,12 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC Unmap: fixed, Map32Bit: map32bit, Private: private, - Perms: usermem.AccessType{ + Perms: hostarch.AccessType{ Read: linux.PROT_READ&prot != 0, Write: linux.PROT_WRITE&prot != 0, Execute: linux.PROT_EXEC&prot != 0, }, - MaxPerms: usermem.AnyAccess, + MaxPerms: hostarch.AnyAccess, GrowsDown: linux.MAP_GROWSDOWN&flags != 0, Precommit: linux.MAP_POPULATE&flags != 0, } diff --git a/pkg/sentry/syscalls/linux/vfs2/mount.go b/pkg/sentry/syscalls/linux/vfs2/mount.go index 769c9b92f..dd93430e2 100644 --- a/pkg/sentry/syscalls/linux/vfs2/mount.go +++ b/pkg/sentry/syscalls/linux/vfs2/mount.go @@ -20,7 +20,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Mount implements Linux syscall mount(2). @@ -33,11 +34,11 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall // For null-terminated strings related to mount(2), Linux copies in at most // a page worth of data. See fs/namespace.c:copy_mount_string(). - fsType, err := t.CopyInString(typeAddr, usermem.PageSize) + fsType, err := t.CopyInString(typeAddr, hostarch.PageSize) if err != nil { return 0, nil, err } - source, err := t.CopyInString(sourceAddr, usermem.PageSize) + source, err := t.CopyInString(sourceAddr, hostarch.PageSize) if err != nil { return 0, nil, err } @@ -53,7 +54,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall // character placement, and the address is passed to each file system. // Most file systems always treat this data as a string, though, and so // do all of the ones we implement. - data, err = t.CopyInString(dataAddr, usermem.PageSize) + data, err = t.CopyInString(dataAddr, hostarch.PageSize) if err != nil { return 0, nil, err } diff --git a/pkg/sentry/syscalls/linux/vfs2/path.go b/pkg/sentry/syscalls/linux/vfs2/path.go index 90a511d9a..2aaf1ed74 100644 --- a/pkg/sentry/syscalls/linux/vfs2/path.go +++ b/pkg/sentry/syscalls/linux/vfs2/path.go @@ -20,10 +20,11 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) -func copyInPath(t *kernel.Task, addr usermem.Addr) (fspath.Path, error) { +func copyInPath(t *kernel.Task, addr hostarch.Addr) (fspath.Path, error) { pathname, err := t.CopyInString(addr, linux.PATH_MAX) if err != nil { return fspath.Path{}, err diff --git a/pkg/sentry/syscalls/linux/vfs2/pipe.go b/pkg/sentry/syscalls/linux/vfs2/pipe.go index 6986e39fe..c6fc1954c 100644 --- a/pkg/sentry/syscalls/linux/vfs2/pipe.go +++ b/pkg/sentry/syscalls/linux/vfs2/pipe.go @@ -22,7 +22,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Pipe implements Linux syscall pipe(2). @@ -38,7 +39,7 @@ func Pipe2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall return 0, nil, pipe2(t, addr, flags) } -func pipe2(t *kernel.Task, addr usermem.Addr, flags int32) error { +func pipe2(t *kernel.Task, addr hostarch.Addr, flags int32) error { if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 { return syserror.EINVAL } diff --git a/pkg/sentry/syscalls/linux/vfs2/poll.go b/pkg/sentry/syscalls/linux/vfs2/poll.go index c22e4ce54..a69c80edd 100644 --- a/pkg/sentry/syscalls/linux/vfs2/poll.go +++ b/pkg/sentry/syscalls/linux/vfs2/poll.go @@ -25,8 +25,9 @@ import ( "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" "gvisor.dev/gvisor/pkg/waiter" + + "gvisor.dev/gvisor/pkg/hostarch" ) // fileCap is the maximum allowable files for poll & select. This has no @@ -158,7 +159,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time. } // copyInPollFDs copies an array of struct pollfd unless nfds exceeds the max. -func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD, error) { +func copyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) { if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) { return nil, syserror.EINVAL } @@ -173,7 +174,7 @@ func copyInPollFDs(t *kernel.Task, addr usermem.Addr, nfds uint) ([]linux.PollFD return pfd, nil } -func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) { +func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) { pfd, err := copyInPollFDs(t, addr, nfds) if err != nil { return timeout, 0, err @@ -201,7 +202,7 @@ func doPoll(t *kernel.Task, addr usermem.Addr, nfds uint, timeout time.Duration) } // CopyInFDSet copies an fd set from select(2)/pselect(2). -func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) { +func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialByte int) ([]byte, error) { set := make([]byte, nBytes) if addr != 0 { @@ -218,7 +219,7 @@ func CopyInFDSet(t *kernel.Task, addr usermem.Addr, nBytes, nBitsInLastPartialBy return set, nil } -func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) { +func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) { if nfds < 0 || nfds > fileCap { return 0, syserror.EINVAL } @@ -368,7 +369,7 @@ func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration) // copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr. // // startNs must be from CLOCK_MONOTONIC. -func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error { +func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr hostarch.Addr) error { if timeout <= 0 { return nil } @@ -381,7 +382,7 @@ func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.D // copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr. // // startNs must be from CLOCK_MONOTONIC. -func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error { +func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr hostarch.Addr) error { if timeout <= 0 { return nil } @@ -396,7 +397,7 @@ func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Du // // +stateify savable type pollRestartBlock struct { - pfdAddr usermem.Addr + pfdAddr hostarch.Addr nfds uint timeout time.Duration } @@ -406,7 +407,7 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) { return poll(t, p.pfdAddr, p.nfds, p.timeout) } -func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) { +func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) { remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout) // On an interrupt poll(2) is restarted with the remaining timeout. if err == syserror.EINTR { @@ -530,7 +531,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca if _, err := maskStruct.CopyIn(t, maskWithSizeAddr); err != nil { return 0, nil, err } - if err := setTempSignalSet(t, usermem.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil { + if err := setTempSignalSet(t, hostarch.Addr(maskStruct.sigsetAddr), uint(maskStruct.sizeofSigset)); err != nil { return 0, nil, err } } @@ -551,7 +552,7 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca // returned value is the maximum that Duration will allow. // // If timespecAddr is NULL, the returned value is negative. -func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) { +func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.Duration, error) { // Use a negative Duration to indicate "no timeout". timeout := time.Duration(-1) if timespecAddr != 0 { @@ -567,7 +568,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.D return timeout, nil } -func setTempSignalSet(t *kernel.Task, maskAddr usermem.Addr, maskSize uint) error { +func setTempSignalSet(t *kernel.Task, maskAddr hostarch.Addr, maskSize uint) error { if maskAddr == 0 { return nil } diff --git a/pkg/sentry/syscalls/linux/vfs2/setstat.go b/pkg/sentry/syscalls/linux/vfs2/setstat.go index 903169dc2..c6330c21a 100644 --- a/pkg/sentry/syscalls/linux/vfs2/setstat.go +++ b/pkg/sentry/syscalls/linux/vfs2/setstat.go @@ -23,7 +23,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/limits" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) const chmodMask = 0777 | linux.S_ISUID | linux.S_ISGID | linux.S_ISVTX @@ -43,7 +44,7 @@ func Fchmodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return 0, nil, fchmodat(t, dirfd, pathAddr, mode) } -func fchmodat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error { +func fchmodat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error { path, err := copyInPath(t, pathAddr) if err != nil { return err @@ -102,7 +103,7 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return 0, nil, fchownat(t, dirfd, pathAddr, owner, group, flags) } -func fchownat(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, owner, group, flags int32) error { +func fchownat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, owner, group, flags int32) error { if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 { return syserror.EINVAL } @@ -327,7 +328,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, followFinalSymlink, &opts) } -func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error { +func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error { if timesAddr == 0 { opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME opts.Stat.Atime.Nsec = linux.UTIME_NOW @@ -391,7 +392,7 @@ func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys return 0, nil, setstatat(t, dirfd, path, shouldAllowEmptyPath, shouldFollowFinalSymlink(flags&linux.AT_SYMLINK_NOFOLLOW == 0), &opts) } -func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr usermem.Addr, opts *vfs.SetStatOptions) error { +func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr hostarch.Addr, opts *vfs.SetStatOptions) error { if timesAddr == 0 { opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME opts.Stat.Atime.Nsec = linux.UTIME_NOW diff --git a/pkg/sentry/syscalls/linux/vfs2/signal.go b/pkg/sentry/syscalls/linux/vfs2/signal.go index b89f34cdb..6163da103 100644 --- a/pkg/sentry/syscalls/linux/vfs2/signal.go +++ b/pkg/sentry/syscalls/linux/vfs2/signal.go @@ -21,11 +21,12 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // sharedSignalfd is shared between the two calls. -func sharedSignalfd(t *kernel.Task, fd int32, sigset usermem.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) { +func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize uint, flags int32) (uintptr, *kernel.SyscallControl, error) { // Copy in the signal mask. mask, err := slinux.CopyInSigSet(t, sigset, sigsetsize) if err != nil { diff --git a/pkg/sentry/syscalls/linux/vfs2/socket.go b/pkg/sentry/syscalls/linux/vfs2/socket.go index 346fd1cea..a87a66146 100644 --- a/pkg/sentry/syscalls/linux/vfs2/socket.go +++ b/pkg/sentry/syscalls/linux/vfs2/socket.go @@ -31,6 +31,8 @@ import ( "gvisor.dev/gvisor/pkg/syserr" "gvisor.dev/gvisor/pkg/syserror" "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // minListenBacklog is the minimum reasonable backlog for listening sockets. @@ -116,7 +118,7 @@ type multipleMessageHeader64 struct { // CaptureAddress allocates memory for and copies a socket address structure // from the untrusted address space range. -func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) { +func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) { if addrlen > maxAddrLen { return nil, syserror.EINVAL } @@ -132,7 +134,7 @@ func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, // writeAddress writes a sockaddr structure and its length to an output buffer // in the unstrusted address space range. If the address is bigger than the // buffer, it is truncated. -func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error { +func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr hostarch.Addr, addrLenPtr hostarch.Addr) error { // Get the buffer length. var bufLen uint32 if _, err := primitive.CopyUint32In(t, addrLenPtr, &bufLen); err != nil { @@ -279,7 +281,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca // accept is the implementation of the accept syscall. It is called by accept // and accept4 syscall handlers. -func accept(t *kernel.Task, fd int32, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) { +func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) { // Check that no unsupported flags are passed in. if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { return 0, syserror.EINVAL @@ -475,7 +477,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy // getSockOpt tries to handle common socket options, or dispatches to a specific // socket implementation. -func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr usermem.Addr, len int) (marshal.Marshallable, *syserr.Error) { +func getSockOpt(t *kernel.Task, s socket.SocketVFS2, level, name int, optValAddr hostarch.Addr, len int) (marshal.Marshallable, *syserr.Error) { if level == linux.SOL_SOCKET { switch name { case linux.SO_TYPE, linux.SO_DOMAIN, linux.SO_PROTOCOL: @@ -738,7 +740,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return uintptr(count), nil, nil } -func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { +func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { // Capture the message header and io vectors. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { @@ -748,7 +750,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla if msg.IovLen > linux.UIO_MAXIOV { return 0, syserror.EMSGSIZE } - dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { @@ -799,7 +801,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla // Copy the address to the caller. if msg.NameLen != 0 { - if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil { + if err := writeAddress(t, sender, senderLen, hostarch.Addr(msg.Name), hostarch.Addr(msgPtr+nameLenOffset)); err != nil { return 0, err } } @@ -809,7 +811,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla return 0, err } if len(controlData) > 0 { - if _, err := t.CopyOutBytes(usermem.Addr(msg.Control), controlData); err != nil { + if _, err := t.CopyOutBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } @@ -824,7 +826,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr usermem.Addr, fla // recvFrom is the implementation of the recvfrom syscall. It is called by // recvfrom and recv syscall handlers. -func recvFrom(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) { +func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) { if int(bufLen) < 0 { return 0, syserror.EINVAL } @@ -1000,7 +1002,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc return uintptr(count), nil, nil } -func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr usermem.Addr, flags int32) (uintptr, error) { +func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescription, msgPtr hostarch.Addr, flags int32) (uintptr, error) { // Capture the message header. var msg MessageHeader64 if _, err := msg.CopyIn(t, msgPtr); err != nil { @@ -1014,7 +1016,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio return 0, syserror.ENOBUFS } controlData = make([]byte, msg.ControlLen) - if _, err := t.CopyInBytes(usermem.Addr(msg.Control), controlData); err != nil { + if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil { return 0, err } } @@ -1023,7 +1025,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio var to []byte if msg.NameLen != 0 { var err error - to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen) + to, err = CaptureAddress(t, hostarch.Addr(msg.Name), msg.NameLen) if err != nil { return 0, err } @@ -1033,7 +1035,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio if msg.IovLen > linux.UIO_MAXIOV { return 0, syserror.EMSGSIZE } - src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ AddressSpaceActive: true, }) if err != nil { @@ -1067,7 +1069,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio // sendTo is the implementation of the sendto syscall. It is called by sendto // and send syscall handlers. -func sendTo(t *kernel.Task, fd int32, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) { +func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) { bl := int(bufLen) if bl < 0 { return 0, syserror.EINVAL diff --git a/pkg/sentry/syscalls/linux/vfs2/stat.go b/pkg/sentry/syscalls/linux/vfs2/stat.go index 0f5d5189c..69e77fa99 100644 --- a/pkg/sentry/syscalls/linux/vfs2/stat.go +++ b/pkg/sentry/syscalls/linux/vfs2/stat.go @@ -24,7 +24,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // Stat implements Linux syscall stat(2). @@ -50,7 +51,7 @@ func Newfstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy return 0, nil, fstatat(t, dirfd, pathAddr, statAddr, flags) } -func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr usermem.Addr, flags int32) error { +func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flags int32) error { if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 { return syserror.EINVAL } @@ -264,7 +265,7 @@ func Faccessat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys return 0, nil, accessAt(t, dirfd, addr, mode) } -func accessAt(t *kernel.Task, dirfd int32, pathAddr usermem.Addr, mode uint) error { +func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) error { const rOK = 4 const wOK = 2 const xOK = 1 @@ -312,7 +313,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy return readlinkat(t, dirfd, pathAddr, bufAddr, size) } -func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr usermem.Addr, size uint) (uintptr, *kernel.SyscallControl, error) { +func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr hostarch.Addr, size uint) (uintptr, *kernel.SyscallControl, error) { if int(size) <= 0 { return 0, nil, syserror.EINVAL } diff --git a/pkg/sentry/syscalls/linux/vfs2/xattr.go b/pkg/sentry/syscalls/linux/vfs2/xattr.go index e05723ef9..c261050c6 100644 --- a/pkg/sentry/syscalls/linux/vfs2/xattr.go +++ b/pkg/sentry/syscalls/linux/vfs2/xattr.go @@ -23,7 +23,8 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" + + "gvisor.dev/gvisor/pkg/hostarch" ) // ListXattr implements Linux syscall listxattr(2). @@ -291,7 +292,7 @@ func Fremovexattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel. return 0, nil, file.RemoveXattr(t, name) } -func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) { +func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) { name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1) if err != nil { if err == syserror.ENAMETOOLONG { @@ -305,7 +306,7 @@ func copyInXattrName(t *kernel.Task, nameAddr usermem.Addr) (string, error) { return name, nil } -func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, names []string) (int, error) { +func copyOutXattrNameList(t *kernel.Task, listAddr hostarch.Addr, size uint, names []string) (int, error) { if size > linux.XATTR_LIST_MAX { size = linux.XATTR_LIST_MAX } @@ -327,7 +328,7 @@ func copyOutXattrNameList(t *kernel.Task, listAddr usermem.Addr, size uint, name return t.CopyOutBytes(listAddr, buf.Bytes()) } -func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string, error) { +func copyInXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint) (string, error) { if size > linux.XATTR_SIZE_MAX { return "", syserror.E2BIG } @@ -338,7 +339,7 @@ func copyInXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint) (string return gohacks.StringFromImmutableBytes(buf), nil } -func copyOutXattrValue(t *kernel.Task, valueAddr usermem.Addr, size uint, value string) (int, error) { +func copyOutXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint, value string) (int, error) { if size > linux.XATTR_SIZE_MAX { size = linux.XATTR_SIZE_MAX } diff --git a/pkg/sentry/vfs/BUILD b/pkg/sentry/vfs/BUILD index df4990854..ac60fe8bf 100644 --- a/pkg/sentry/vfs/BUILD +++ b/pkg/sentry/vfs/BUILD @@ -99,6 +99,7 @@ go_library( "//pkg/fdnotifier", "//pkg/fspath", "//pkg/gohacks", + "//pkg/hostarch", "//pkg/log", "//pkg/refs", "//pkg/refsvfs2", diff --git a/pkg/sentry/vfs/anonfs.go b/pkg/sentry/vfs/anonfs.go index 3caf417ca..f48817132 100644 --- a/pkg/sentry/vfs/anonfs.go +++ b/pkg/sentry/vfs/anonfs.go @@ -20,10 +20,10 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" "gvisor.dev/gvisor/pkg/syserror" - "gvisor.dev/gvisor/pkg/usermem" ) // NewAnonVirtualDentry returns a VirtualDentry with the given synthetic name, @@ -43,7 +43,7 @@ func (vfs *VirtualFilesystem) NewAnonVirtualDentry(name string) VirtualDentry { } const ( - anonfsBlockSize = usermem.PageSize // via fs/libfs.c:pseudo_fs_fill_super() + anonfsBlockSize = hostarch.PageSize // via fs/libfs.c:pseudo_fs_fill_super() // Mode, UID, and GID for a generic anonfs file. anonFileMode = 0600 // no type is correct diff --git a/pkg/sentry/vfs/filesystem_impl_util.go b/pkg/sentry/vfs/filesystem_impl_util.go index 2620cf975..15b234d61 100644 --- a/pkg/sentry/vfs/filesystem_impl_util.go +++ b/pkg/sentry/vfs/filesystem_impl_util.go @@ -18,7 +18,7 @@ import ( "strings" "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // GenericParseMountOptions parses a comma-separated list of options of the @@ -50,7 +50,7 @@ func GenericParseMountOptions(str string) map[string]string { func GenericStatFS(fsMagic uint64) linux.Statfs { return linux.Statfs{ Type: fsMagic, - BlockSize: usermem.PageSize, + BlockSize: hostarch.PageSize, NameLength: linux.NAME_MAX, } } diff --git a/pkg/sentry/vfs/inotify.go b/pkg/sentry/vfs/inotify.go index 32fa01578..49d29e20b 100644 --- a/pkg/sentry/vfs/inotify.go +++ b/pkg/sentry/vfs/inotify.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/uniqueid" "gvisor.dev/gvisor/pkg/sync" @@ -256,7 +257,7 @@ func (i *Inotify) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallAr n += uint32(e.sizeOf()) } var buf [4]byte - usermem.ByteOrder.PutUint32(buf[:], n) + hostarch.ByteOrder.PutUint32(buf[:], n) _, err := uio.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{}) return 0, err @@ -683,10 +684,10 @@ func (e *Event) sizeOf() int { // construct the output. We use a buffer allocated ahead of time for // performance. buf must be at least inotifyEventBaseSize bytes. func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) { - usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) - usermem.ByteOrder.PutUint32(buf[4:], e.mask) - usermem.ByteOrder.PutUint32(buf[8:], e.cookie) - usermem.ByteOrder.PutUint32(buf[12:], e.len) + hostarch.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) + hostarch.ByteOrder.PutUint32(buf[4:], e.mask) + hostarch.ByteOrder.PutUint32(buf[8:], e.cookie) + hostarch.ByteOrder.PutUint32(buf[12:], e.len) writeLen := 0 |