diff options
author | gVisor bot <gvisor-bot@google.com> | 2021-03-29 20:35:44 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2021-03-29 20:35:44 +0000 |
commit | 08cc017c088017546ed712cce700bf4374c864c0 (patch) | |
tree | af024e69d8855f4f867ef435ced35532b368a981 /pkg/sentry/platform/kvm | |
parent | 6a422755602daeaef4be60969c1acddc8b7b3041 (diff) | |
parent | 8a2f7e716dcc62f04d2808e8ade34941c94fc956 (diff) |
Merge release-20210322.0-29-g8a2f7e716 (automated)
Diffstat (limited to 'pkg/sentry/platform/kvm')
-rw-r--r-- | pkg/sentry/platform/kvm/address_space.go | 18 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/bluepill_fault.go | 4 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/context.go | 6 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/kvm.go | 10 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/machine.go | 6 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/machine_amd64.go | 46 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/machine_arm64.go | 16 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/machine_arm64_unsafe.go | 14 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/physical_map.go | 10 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/virtual_map.go | 6 |
10 files changed, 68 insertions, 68 deletions
diff --git a/pkg/sentry/platform/kvm/address_space.go b/pkg/sentry/platform/kvm/address_space.go index 25c21e843..5524e8727 100644 --- a/pkg/sentry/platform/kvm/address_space.go +++ b/pkg/sentry/platform/kvm/address_space.go @@ -18,11 +18,11 @@ import ( "sync/atomic" "gvisor.dev/gvisor/pkg/atomicbitops" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // dirtySet tracks vCPUs for invalidation. @@ -118,7 +118,7 @@ type hostMapEntry struct { // +checkescape:hard,stack // //go:nosplit -func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) { +func (as *addressSpace) mapLocked(addr hostarch.Addr, m hostMapEntry, at hostarch.AccessType) (inv bool) { for m.length > 0 { physical, length, ok := translateToPhysical(m.addr) if !ok { @@ -144,14 +144,14 @@ func (as *addressSpace) mapLocked(addr usermem.Addr, m hostMapEntry, at usermem. }, physical) || inv m.addr += length m.length -= length - addr += usermem.Addr(length) + addr += hostarch.Addr(length) } return inv } // MapFile implements platform.AddressSpace.MapFile. -func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.FileRange, at usermem.AccessType, precommit bool) error { +func (as *addressSpace) MapFile(addr hostarch.Addr, f memmap.File, fr memmap.FileRange, at hostarch.AccessType, precommit bool) error { as.mu.Lock() defer as.mu.Unlock() @@ -165,7 +165,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // We don't execute from application file-mapped memory, and guest page // tables don't care if we have execute permission (but they do need pages // to be readable). - bs, err := f.MapInternal(fr, usermem.AccessType{ + bs, err := f.MapInternal(fr, hostarch.AccessType{ Read: at.Read || at.Execute || precommit, Write: at.Write, }) @@ -187,7 +187,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // lookup in our host page tables for this translation. if precommit { s := b.ToSlice() - for i := 0; i < len(s); i += usermem.PageSize { + for i := 0; i < len(s); i += hostarch.PageSize { _ = s[i] // Touch to commit. } } @@ -201,7 +201,7 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File length: uintptr(b.Len()), }, at) inv = inv || prev - addr += usermem.Addr(b.Len()) + addr += hostarch.Addr(b.Len()) } if inv { as.invalidate() @@ -215,12 +215,12 @@ func (as *addressSpace) MapFile(addr usermem.Addr, f memmap.File, fr memmap.File // +checkescape:hard,stack // //go:nosplit -func (as *addressSpace) unmapLocked(addr usermem.Addr, length uint64) bool { +func (as *addressSpace) unmapLocked(addr hostarch.Addr, length uint64) bool { return as.pageTables.Unmap(addr, uintptr(length)) } // Unmap unmaps the given range by calling pagetables.PageTables.Unmap. -func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) { +func (as *addressSpace) Unmap(addr hostarch.Addr, length uint64) { as.mu.Lock() defer as.mu.Unlock() diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go index 37c53fa02..28a613a54 100644 --- a/pkg/sentry/platform/kvm/bluepill_fault.go +++ b/pkg/sentry/platform/kvm/bluepill_fault.go @@ -18,7 +18,7 @@ import ( "sync/atomic" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) const ( @@ -47,7 +47,7 @@ func yield() { // //go:nosplit func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virtualStart, physicalStart, length uintptr, ok bool) { - alignedPhysical := physical &^ uintptr(usermem.PageSize-1) + alignedPhysical := physical &^ uintptr(hostarch.PageSize-1) for _, pr := range phyRegions { end := pr.physical + pr.length if physical < pr.physical || physical >= end { diff --git a/pkg/sentry/platform/kvm/context.go b/pkg/sentry/platform/kvm/context.go index 706fa53dc..f4d4473a8 100644 --- a/pkg/sentry/platform/kvm/context.go +++ b/pkg/sentry/platform/kvm/context.go @@ -18,11 +18,11 @@ import ( "sync/atomic" pkgcontext "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sentry/platform/interrupt" - "gvisor.dev/gvisor/pkg/usermem" ) // context is an implementation of the platform context. @@ -40,7 +40,7 @@ type context struct { } // Switch runs the provided context in the given address space. -func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, usermem.AccessType, error) { +func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac arch.Context, _ int32) (*arch.SignalInfo, hostarch.AccessType, error) { as := mm.AddressSpace() localAS := as.(*addressSpace) @@ -50,7 +50,7 @@ func (c *context) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac a // Enable interrupts (i.e. calls to vCPU.Notify). if !c.interrupt.Enable(cpu) { c.machine.Put(cpu) // Already preempted. - return nil, usermem.NoAccess, platform.ErrContextInterrupt + return nil, hostarch.NoAccess, platform.ErrContextInterrupt } // Set the active address space. diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go index 92c05a9ad..aac0fdffe 100644 --- a/pkg/sentry/platform/kvm/kvm.go +++ b/pkg/sentry/platform/kvm/kvm.go @@ -20,11 +20,11 @@ import ( "os" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // userMemoryRegion is a region of physical memory. @@ -146,13 +146,13 @@ func (*KVM) MapUnit() uint64 { } // MinUserAddress returns the lowest available address. -func (*KVM) MinUserAddress() usermem.Addr { - return usermem.PageSize +func (*KVM) MinUserAddress() hostarch.Addr { + return hostarch.PageSize } // MaxUserAddress returns the first address that may not be used. -func (*KVM) MaxUserAddress() usermem.Addr { - return usermem.Addr(ring0.MaximumUserAddress) +func (*KVM) MaxUserAddress() hostarch.Addr { + return hostarch.Addr(ring0.MaximumUserAddress) } // NewAddressSpace returns a new pagetable root. diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go index 5d586f257..b3d4188a3 100644 --- a/pkg/sentry/platform/kvm/machine.go +++ b/pkg/sentry/platform/kvm/machine.go @@ -21,13 +21,13 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/atomicbitops" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/procid" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" ktime "gvisor.dev/gvisor/pkg/sentry/time" "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/usermem" ) // machine contains state associated with the VM as a whole. @@ -227,9 +227,9 @@ func newMachine(vm int) (*machine, error) { applyPhysicalRegions(func(pr physicalRegion) bool { // Map everything in the lower half. m.kernel.PageTables.Map( - usermem.Addr(pr.virtual), + hostarch.Addr(pr.virtual), pr.length, - pagetables.MapOpts{AccessType: usermem.AnyAccess}, + pagetables.MapOpts{AccessType: hostarch.AnyAccess}, pr.physical) return true // Keep iterating. diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go index 3af96c7e5..e8e209249 100644 --- a/pkg/sentry/platform/kvm/machine_amd64.go +++ b/pkg/sentry/platform/kvm/machine_amd64.go @@ -24,13 +24,13 @@ import ( "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" ktime "gvisor.dev/gvisor/pkg/sentry/time" - "gvisor.dev/gvisor/pkg/usermem" ) // initArchState initializes architecture-specific state. @@ -41,7 +41,7 @@ func (m *machine) initArchState() error { unix.SYS_IOCTL, uintptr(m.fd), _KVM_SET_TSS_ADDR, - uintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 { + uintptr(reservedMemory-(3*hostarch.PageSize))); errno != 0 { return errno } @@ -256,19 +256,19 @@ func (c *vCPU) setSystemTime() error { // nonCanonical generates a canonical address return. // //go:nosplit -func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { *info = arch.SignalInfo{ Signo: signal, Code: arch.SignalInfoKernel, } info.SetAddr(addr) // Include address. - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal } // fault generates an appropriate fault return. // //go:nosplit -func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := ring0.ReadCR2() code, user := c.ErrorCode() @@ -276,12 +276,12 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. *info = arch.SignalInfo{Signo: signal} info.SetAddr(uint64(faultAddr)) - accessType := usermem.AccessType{ + accessType := hostarch.AccessType{ Read: code&(1<<1) == 0, Write: code&(1<<1) != 0, Execute: code&(1<<4) != 0, @@ -310,14 +310,14 @@ func loadByte(ptr *byte) byte { //go:nosplit func prefaultFloatingPointState(data *fpu.State) { size := len(*data) - for i := 0; i < size; i += usermem.PageSize { + for i := 0; i < size; i += hostarch.PageSize { loadByte(&(*data)[i]) } loadByte(&(*data)[size-1]) } // SwitchToUser unpacks architectural-details. -func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Rip) { return nonCanonical(regs.Rip, int32(unix.SIGSEGV), info) @@ -353,7 +353,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) switch vector { case ring0.Syscall, ring0.SyscallInt80: // Fast path: system call executed. - return usermem.NoAccess, nil + return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) @@ -364,7 +364,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.GeneralProtectionFault, ring0.SegmentNotPresent, @@ -380,9 +380,9 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) // When CPUID faulting is enabled, we will generate a #GP(0) when // userspace executes a CPUID instruction. This is handled above, // because we need to be able to map and read user memory. - return usermem.AccessType{}, platform.ErrContextSignalCPUID + return hostarch.AccessType{}, platform.ErrContextSignalCPUID } - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.InvalidOpcode: *info = arch.SignalInfo{ @@ -390,7 +390,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // ILL_ILLOPC (illegal opcode). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.DivideByZero: *info = arch.SignalInfo{ @@ -398,7 +398,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // FPE_INTDIV (divide by zero). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Overflow: *info = arch.SignalInfo{ @@ -406,7 +406,7 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 2, // FPE_INTOVF (integer overflow). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.X87FloatingPointException, ring0.SIMDFloatingPointException: @@ -415,17 +415,17 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 7, // FPE_FLTINV (invalid operation). } info.SetAddr(switchOpts.Registers.Rip) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.Vector(bounce): // ring0.VirtualizationException - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.AlignmentCheck: *info = arch.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal case ring0.NMI: // An NMI is generated only when a fault is not servicable by @@ -471,9 +471,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { panic("impossible translation") } pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|r.virtual), + hostarch.Addr(ring0.KernelStartAddress|r.virtual), r.length, - pagetables.MapOpts{AccessType: usermem.Execute}, + pagetables.MapOpts{AccessType: hostarch.Execute}, physical) } }) @@ -484,9 +484,9 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { panic("impossible translation") } pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|start), + hostarch.Addr(ring0.KernelStartAddress|start), regionLen, - pagetables.MapOpts{AccessType: usermem.ReadWrite}, + pagetables.MapOpts{AccessType: hostarch.ReadWrite}, physical) } } diff --git a/pkg/sentry/platform/kvm/machine_arm64.go b/pkg/sentry/platform/kvm/machine_arm64.go index 2edc9d1b2..03e84d804 100644 --- a/pkg/sentry/platform/kvm/machine_arm64.go +++ b/pkg/sentry/platform/kvm/machine_arm64.go @@ -17,12 +17,12 @@ package kvm import ( + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) type vCPUArchState struct { @@ -53,9 +53,9 @@ const ( func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) { applyPhysicalRegions(func(pr physicalRegion) bool { pageTable.Map( - usermem.Addr(ring0.KernelStartAddress|pr.virtual), + hostarch.Addr(ring0.KernelStartAddress|pr.virtual), pr.length, - pagetables.MapOpts{AccessType: usermem.AnyAccess, Global: true}, + pagetables.MapOpts{AccessType: hostarch.AnyAccess, Global: true}, pr.physical) return true // Keep iterating. @@ -117,13 +117,13 @@ func availableRegionsForSetMem() (phyRegions []physicalRegion) { // nonCanonical generates a canonical address return. // //go:nosplit -func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func nonCanonical(addr uint64, signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { *info = arch.SignalInfo{ Signo: signal, Code: arch.SignalInfoKernel, } info.SetAddr(addr) // Include address. - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal } // isInstructionAbort returns true if it is an instruction abort. @@ -148,7 +148,7 @@ func isWriteFault(code uint64) bool { // fault generates an appropriate fault return. // //go:nosplit -func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (hostarch.AccessType, error) { bluepill(c) // Probably no-op, but may not be. faultAddr := c.GetFaultAddr() code, user := c.ErrorCode() @@ -157,7 +157,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e // The last fault serviced by this CPU was not a user // fault, so we can't reliably trust the faultAddr or // the code provided here. We need to re-execute. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt } // Reset the pointed SignalInfo. @@ -174,7 +174,7 @@ func (c *vCPU) fault(signal int32, info *arch.SignalInfo) (usermem.AccessType, e info.Code = 2 } - accessType := usermem.AccessType{ + accessType := hostarch.AccessType{ Read: !isWriteFault(uint64(code)), Write: isWriteFault(uint64(code)), Execute: isInstructionAbort(uint64(code)), diff --git a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go index e7d5f3193..634e55ec0 100644 --- a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go +++ b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go @@ -23,12 +23,12 @@ import ( "unsafe" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/pkg/usermem" ) type kvmVcpuInit struct { @@ -209,7 +209,7 @@ func (c *vCPU) getOneRegister(reg *kvmOneReg) error { } // SwitchToUser unpacks architectural-details. -func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (usermem.AccessType, error) { +func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) (hostarch.AccessType, error) { // Check for canonical addresses. if regs := switchOpts.Registers; !ring0.IsCanonical(regs.Pc) { return nonCanonical(regs.Pc, int32(unix.SIGSEGV), info) @@ -246,13 +246,13 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) switch vector { case ring0.Syscall: // Fast path: system call executed. - return usermem.NoAccess, nil + return hostarch.NoAccess, nil case ring0.PageFault: return c.fault(int32(unix.SIGSEGV), info) case ring0.El0ErrNMI: return c.fault(int32(unix.SIGBUS), info) case ring0.Vector(bounce): // ring0.VirtualizationException. - return usermem.NoAccess, platform.ErrContextInterrupt + return hostarch.NoAccess, platform.ErrContextInterrupt case ring0.El0SyncUndef: return c.fault(int32(unix.SIGILL), info) case ring0.El0SyncDbg: @@ -261,16 +261,16 @@ func (c *vCPU) SwitchToUser(switchOpts ring0.SwitchOpts, info *arch.SignalInfo) Code: 1, // TRAP_BRKPT (breakpoint). } info.SetAddr(switchOpts.Registers.Pc) // Include address. - return usermem.AccessType{}, platform.ErrContextSignal + return hostarch.AccessType{}, platform.ErrContextSignal case ring0.El0SyncSpPc: *info = arch.SignalInfo{ Signo: int32(unix.SIGBUS), Code: 2, // BUS_ADRERR (physical address does not exist). } - return usermem.NoAccess, platform.ErrContextSignal + return hostarch.NoAccess, platform.ErrContextSignal case ring0.El0SyncSys, ring0.El0SyncWfx: - return usermem.NoAccess, nil // skip for now. + return hostarch.NoAccess, nil // skip for now. default: panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) } diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go index 7376d8b8d..d812e6c26 100644 --- a/pkg/sentry/platform/kvm/physical_map.go +++ b/pkg/sentry/platform/kvm/physical_map.go @@ -19,9 +19,9 @@ import ( "sort" "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/ring0" - "gvisor.dev/gvisor/pkg/usermem" ) type region struct { @@ -81,7 +81,7 @@ func fillAddressSpace() (excludedRegions []region) { // faultBlockSize, potentially causing up to faultBlockSize bytes in // internal fragmentation for each physical region. So we need to // account for this properly during allocation. - requiredAddr, ok := usermem.Addr(vSize - pSize + faultBlockSize).RoundUp() + requiredAddr, ok := hostarch.Addr(vSize - pSize + faultBlockSize).RoundUp() if !ok { panic(fmt.Sprintf( "overflow for vSize (%x) - pSize (%x) + faultBlockSize (%x)", @@ -99,7 +99,7 @@ func fillAddressSpace() (excludedRegions []region) { 0, 0) if errno != 0 { // Attempt half the size; overflow not possible. - currentAddr, _ := usermem.Addr(current >> 1).RoundUp() + currentAddr, _ := hostarch.Addr(current >> 1).RoundUp() current = uintptr(currentAddr) continue } @@ -134,8 +134,8 @@ func computePhysicalRegions(excludedRegions []region) (physicalRegions []physica return } if virtual == 0 { - virtual += usermem.PageSize - length -= usermem.PageSize + virtual += hostarch.PageSize + length -= hostarch.PageSize } if end := virtual + length; end > ring0.MaximumUserAddress { length -= (end - ring0.MaximumUserAddress) diff --git a/pkg/sentry/platform/kvm/virtual_map.go b/pkg/sentry/platform/kvm/virtual_map.go index 4dcdbf8a7..01d9eb39d 100644 --- a/pkg/sentry/platform/kvm/virtual_map.go +++ b/pkg/sentry/platform/kvm/virtual_map.go @@ -22,12 +22,12 @@ import ( "regexp" "strconv" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) type virtualRegion struct { region - accessType usermem.AccessType + accessType hostarch.AccessType shared bool offset uintptr filename string @@ -92,7 +92,7 @@ func applyVirtualRegions(fn func(vr virtualRegion)) error { virtual: uintptr(start), length: uintptr(end - start), }, - accessType: usermem.AccessType{ + accessType: hostarch.AccessType{ Read: read, Write: write, Execute: execute, |