summaryrefslogtreecommitdiffhomepage
path: root/pkg/ring0
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/ring0')
-rw-r--r--pkg/ring0/BUILD2
-rw-r--r--pkg/ring0/defs_amd64.go4
-rw-r--r--pkg/ring0/defs_arm64.go4
-rw-r--r--pkg/ring0/gen_offsets/BUILD2
-rw-r--r--pkg/ring0/kernel_amd64.go35
-rw-r--r--pkg/ring0/kernel_arm64.go8
-rw-r--r--pkg/ring0/lib_amd64.go6
-rw-r--r--pkg/ring0/lib_amd64.s12
-rw-r--r--pkg/ring0/lib_arm64.go6
-rw-r--r--pkg/ring0/lib_arm64.s8
-rw-r--r--pkg/ring0/pagetables/BUILD4
-rw-r--r--pkg/ring0/pagetables/allocator_unsafe.go10
-rw-r--r--pkg/ring0/pagetables/pagetables.go18
-rw-r--r--pkg/ring0/pagetables/pagetables_aarch64.go6
-rw-r--r--pkg/ring0/pagetables/pagetables_amd64_test.go34
-rw-r--r--pkg/ring0/pagetables/pagetables_arm64_test.go42
-rw-r--r--pkg/ring0/pagetables/pagetables_test.go35
-rw-r--r--pkg/ring0/pagetables/pagetables_x86.go6
18 files changed, 148 insertions, 94 deletions
diff --git a/pkg/ring0/BUILD b/pkg/ring0/BUILD
index 885958456..fda6ba601 100644
--- a/pkg/ring0/BUILD
+++ b/pkg/ring0/BUILD
@@ -77,10 +77,10 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/cpuid",
+ "//pkg/hostarch",
"//pkg/ring0/pagetables",
"//pkg/safecopy",
"//pkg/sentry/arch",
"//pkg/sentry/arch/fpu",
- "//pkg/usermem",
],
)
diff --git a/pkg/ring0/defs_amd64.go b/pkg/ring0/defs_amd64.go
index ceddf719d..76776c65c 100644
--- a/pkg/ring0/defs_amd64.go
+++ b/pkg/ring0/defs_amd64.go
@@ -17,7 +17,7 @@
package ring0
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
var (
@@ -25,7 +25,7 @@ var (
UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1)
// MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
diff --git a/pkg/ring0/defs_arm64.go b/pkg/ring0/defs_arm64.go
index c372b02bb..0125690d2 100644
--- a/pkg/ring0/defs_arm64.go
+++ b/pkg/ring0/defs_arm64.go
@@ -17,7 +17,7 @@
package ring0
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
var (
@@ -25,7 +25,7 @@ var (
UserspaceSize = uintptr(1) << (VirtualAddressBits())
// MaximumUserAddress is the largest possible user address.
- MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1)
+ MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1)
// KernelStartAddress is the starting kernel address.
KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1)
diff --git a/pkg/ring0/gen_offsets/BUILD b/pkg/ring0/gen_offsets/BUILD
index f421e1687..9ea8f9a4f 100644
--- a/pkg/ring0/gen_offsets/BUILD
+++ b/pkg/ring0/gen_offsets/BUILD
@@ -33,9 +33,9 @@ go_binary(
],
deps = [
"//pkg/cpuid",
+ "//pkg/hostarch",
"//pkg/ring0/pagetables",
"//pkg/sentry/arch",
"//pkg/sentry/arch/fpu",
- "//pkg/usermem",
],
)
diff --git a/pkg/ring0/kernel_amd64.go b/pkg/ring0/kernel_amd64.go
index 33c259757..41dfd0bf9 100644
--- a/pkg/ring0/kernel_amd64.go
+++ b/pkg/ring0/kernel_amd64.go
@@ -20,7 +20,7 @@ import (
"encoding/binary"
"reflect"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// init initializes architecture-specific state.
@@ -34,7 +34,7 @@ func (k *Kernel) init(maxCPUs int) {
entries = make([]kernelEntry, maxCPUs+padding-1)
totalSize := entrySize * uintptr(maxCPUs+padding-1)
addr := reflect.ValueOf(&entries[0]).Pointer()
- if addr&(usermem.PageSize-1) == 0 && totalSize >= usermem.PageSize {
+ if addr&(hostarch.PageSize-1) == 0 && totalSize >= hostarch.PageSize {
// The runtime forces power-of-2 alignment for allocations, and we are therefore
// safe once the first address is aligned and the chunk is at least a full page.
break
@@ -44,10 +44,10 @@ func (k *Kernel) init(maxCPUs int) {
k.cpuEntries = entries
k.globalIDT = &idt64{}
- if reflect.TypeOf(idt64{}).Size() != usermem.PageSize {
+ if reflect.TypeOf(idt64{}).Size() != hostarch.PageSize {
panic("Size of globalIDT should be PageSize")
}
- if reflect.ValueOf(k.globalIDT).Pointer()&(usermem.PageSize-1) != 0 {
+ if reflect.ValueOf(k.globalIDT).Pointer()&(hostarch.PageSize-1) != 0 {
panic("Allocated globalIDT should be page aligned")
}
@@ -71,13 +71,13 @@ func (k *Kernel) EntryRegions() map[uintptr]uintptr {
addr := reflect.ValueOf(&k.cpuEntries[0]).Pointer()
size := reflect.TypeOf(kernelEntry{}).Size() * uintptr(len(k.cpuEntries))
- end, _ := usermem.Addr(addr + size).RoundUp()
- regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end)
+ end, _ := hostarch.Addr(addr + size).RoundUp()
+ regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end)
addr = reflect.ValueOf(k.globalIDT).Pointer()
size = reflect.TypeOf(idt64{}).Size()
- end, _ = usermem.Addr(addr + size).RoundUp()
- regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end)
+ end, _ = hostarch.Addr(addr + size).RoundUp()
+ regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end)
return regions
}
@@ -250,6 +250,7 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {
}
SaveFloatingPoint(switchOpts.FloatingPointState.BytePointer()) // escapes: no. Copy out floating point.
WriteFS(uintptr(c.registers.Fs_base)) // escapes: no. Restore kernel FS.
+ RestoreKernelFPState() // escapes: no. Restore kernel MXCSR.
return
}
@@ -321,3 +322,21 @@ func SetCPUIDFaulting(on bool) bool {
func ReadCR2() uintptr {
return readCR2()
}
+
+// kernelMXCSR is the value of the mxcsr register in the Sentry.
+//
+// The MXCSR control configuration is initialized once and never changed. Look
+// at src/cmd/compile/abi-internal.md in the golang sources for more details.
+var kernelMXCSR uint32
+
+// RestoreKernelFPState restores the Sentry floating point state.
+//
+//go:nosplit
+func RestoreKernelFPState() {
+ // Restore the MXCSR control configuration.
+ ldmxcsr(&kernelMXCSR)
+}
+
+func init() {
+ stmxcsr(&kernelMXCSR)
+}
diff --git a/pkg/ring0/kernel_arm64.go b/pkg/ring0/kernel_arm64.go
index 7975e5f92..21db910a2 100644
--- a/pkg/ring0/kernel_arm64.go
+++ b/pkg/ring0/kernel_arm64.go
@@ -65,7 +65,7 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {
storeEl0Fpstate(switchOpts.FloatingPointState.BytePointer())
if switchOpts.Flush {
- FlushTlbByASID(uintptr(switchOpts.UserASID))
+ LocalFlushTlbByASID(uintptr(switchOpts.UserASID))
}
regs := switchOpts.Registers
@@ -89,3 +89,9 @@ func (c *CPU) SwitchToUser(switchOpts SwitchOpts) (vector Vector) {
return
}
+
+// RestoreKernelFPState restores the Sentry floating point state.
+//
+//go:nosplit
+func RestoreKernelFPState() {
+}
diff --git a/pkg/ring0/lib_amd64.go b/pkg/ring0/lib_amd64.go
index 0ec5c3bc5..3e6bb9663 100644
--- a/pkg/ring0/lib_amd64.go
+++ b/pkg/ring0/lib_amd64.go
@@ -61,6 +61,12 @@ func wrgsbase(addr uintptr)
// wrgsmsr writes to the GS_BASE MSR.
func wrgsmsr(addr uintptr)
+// stmxcsr reads the MXCSR control and status register.
+func stmxcsr(addr *uint32)
+
+// ldmxcsr writes to the MXCSR control and status register.
+func ldmxcsr(addr *uint32)
+
// readCR2 reads the current CR2 value.
func readCR2() uintptr
diff --git a/pkg/ring0/lib_amd64.s b/pkg/ring0/lib_amd64.s
index 2fe83568a..70a43e79e 100644
--- a/pkg/ring0/lib_amd64.s
+++ b/pkg/ring0/lib_amd64.s
@@ -198,3 +198,15 @@ TEXT ·rdmsr(SB),NOSPLIT,$0-16
MOVL AX, ret+8(FP)
MOVL DX, ret+12(FP)
RET
+
+// stmxcsr reads the MXCSR control and status register.
+TEXT ·stmxcsr(SB),NOSPLIT,$0-8
+ MOVQ addr+0(FP), SI
+ STMXCSR (SI)
+ RET
+
+// ldmxcsr writes to the MXCSR control and status register.
+TEXT ·ldmxcsr(SB),NOSPLIT,$0-8
+ MOVQ addr+0(FP), SI
+ LDMXCSR (SI)
+ RET
diff --git a/pkg/ring0/lib_arm64.go b/pkg/ring0/lib_arm64.go
index edf24eda3..5eabd4296 100644
--- a/pkg/ring0/lib_arm64.go
+++ b/pkg/ring0/lib_arm64.go
@@ -31,6 +31,9 @@ func FlushTlbByVA(addr uintptr)
// FlushTlbByASID invalidates tlb by ASID/Inner-Shareable.
func FlushTlbByASID(asid uintptr)
+// LocalFlushTlbByASID invalidates tlb by ASID.
+func LocalFlushTlbByASID(asid uintptr)
+
// FlushTlbAll invalidates all tlb.
func FlushTlbAll()
@@ -62,9 +65,10 @@ func LoadFloatingPoint(*byte)
// SaveFloatingPoint saves floating point state.
func SaveFloatingPoint(*byte)
+// FPSIMDDisableTrap disables fpsimd.
func FPSIMDDisableTrap()
-// DisableVFP disables fpsimd.
+// FPSIMDEnableTrap enables fpsimd.
func FPSIMDEnableTrap()
// Init sets function pointers based on architectural features.
diff --git a/pkg/ring0/lib_arm64.s b/pkg/ring0/lib_arm64.s
index e39b32841..69ebaf519 100644
--- a/pkg/ring0/lib_arm64.s
+++ b/pkg/ring0/lib_arm64.s
@@ -32,6 +32,14 @@ TEXT ·FlushTlbByASID(SB),NOSPLIT,$0-8
DSB $11 // dsb(ish)
RET
+TEXT ·LocalFlushTlbByASID(SB),NOSPLIT,$0-8
+ MOVD asid+0(FP), R1
+ LSL $TLBI_ASID_SHIFT, R1, R1
+ DSB $10 // dsb(ishst)
+ WORD $0xd5088741 // tlbi aside1, x1
+ DSB $11 // dsb(ish)
+ RET
+
TEXT ·LocalFlushTlbAll(SB),NOSPLIT,$0
DSB $6 // dsb(nshst)
WORD $0xd508871f // __tlbi(vmalle1)
diff --git a/pkg/ring0/pagetables/BUILD b/pkg/ring0/pagetables/BUILD
index 65a978cbb..f855f4d42 100644
--- a/pkg/ring0/pagetables/BUILD
+++ b/pkg/ring0/pagetables/BUILD
@@ -68,8 +68,8 @@ go_library(
"//pkg/sentry/platform/kvm:__subpackages__",
],
deps = [
+ "//pkg/hostarch",
"//pkg/sync",
- "//pkg/usermem",
],
)
@@ -84,5 +84,5 @@ go_test(
":walker_check_arm64",
],
library = ":pagetables",
- deps = ["//pkg/usermem"],
+ deps = ["//pkg/hostarch"],
)
diff --git a/pkg/ring0/pagetables/allocator_unsafe.go b/pkg/ring0/pagetables/allocator_unsafe.go
index d08bfdeb3..191d0942b 100644
--- a/pkg/ring0/pagetables/allocator_unsafe.go
+++ b/pkg/ring0/pagetables/allocator_unsafe.go
@@ -17,23 +17,23 @@ package pagetables
import (
"unsafe"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// newAlignedPTEs returns a set of aligned PTEs.
func newAlignedPTEs() *PTEs {
ptes := new(PTEs)
- offset := physicalFor(ptes) & (usermem.PageSize - 1)
+ offset := physicalFor(ptes) & (hostarch.PageSize - 1)
if offset == 0 {
// Already aligned.
return ptes
}
// Need to force an aligned allocation.
- unaligned := make([]byte, (2*usermem.PageSize)-1)
- offset = uintptr(unsafe.Pointer(&unaligned[0])) & (usermem.PageSize - 1)
+ unaligned := make([]byte, (2*hostarch.PageSize)-1)
+ offset = uintptr(unsafe.Pointer(&unaligned[0])) & (hostarch.PageSize - 1)
if offset != 0 {
- offset = usermem.PageSize - offset
+ offset = hostarch.PageSize - offset
}
return (*PTEs)(unsafe.Pointer(&unaligned[offset]))
}
diff --git a/pkg/ring0/pagetables/pagetables.go b/pkg/ring0/pagetables/pagetables.go
index 8c0a6aa82..3f17fba49 100644
--- a/pkg/ring0/pagetables/pagetables.go
+++ b/pkg/ring0/pagetables/pagetables.go
@@ -21,7 +21,7 @@
package pagetables
import (
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// PageTables is a set of page tables.
@@ -142,7 +142,7 @@ func (*mapVisitor) requiresSplit() bool { return true }
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool {
+func (p *PageTables) Map(addr hostarch.Addr, length uintptr, opts MapOpts, physical uintptr) bool {
if p.readOnlyShared {
panic("Should not modify read-only shared pagetables.")
}
@@ -198,7 +198,7 @@ func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) bool {
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool {
+func (p *PageTables) Unmap(addr hostarch.Addr, length uintptr) bool {
if p.readOnlyShared {
panic("Should not modify read-only shared pagetables.")
}
@@ -249,7 +249,7 @@ func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) bool {
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool {
+func (p *PageTables) IsEmpty(addr hostarch.Addr, length uintptr) bool {
w := emptyWalker{
pageTables: p,
}
@@ -298,9 +298,9 @@ func (*lookupVisitor) requiresSplit() bool { return false }
//
// +checkescape:hard,stack
//go:nosplit
-func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem.Addr, physical, size uintptr, opts MapOpts) {
- mask := uintptr(usermem.PageSize - 1)
- addr &^= usermem.Addr(mask)
+func (p *PageTables) Lookup(addr hostarch.Addr, findFirst bool) (virtual hostarch.Addr, physical, size uintptr, opts MapOpts) {
+ mask := uintptr(hostarch.PageSize - 1)
+ addr &^= hostarch.Addr(mask)
w := lookupWalker{
pageTables: p,
visitor: lookupVisitor{
@@ -308,12 +308,12 @@ func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem.
findFirst: findFirst,
},
}
- end := ^usermem.Addr(0) &^ usermem.Addr(mask)
+ end := ^hostarch.Addr(0) &^ hostarch.Addr(mask)
if !findFirst {
end = addr + 1
}
w.iterateRange(uintptr(addr), uintptr(end))
- return usermem.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts
+ return hostarch.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts
}
// MarkReadOnlyShared marks the pagetables read-only and can be shared.
diff --git a/pkg/ring0/pagetables/pagetables_aarch64.go b/pkg/ring0/pagetables/pagetables_aarch64.go
index 163a3aea3..86eb00a4f 100644
--- a/pkg/ring0/pagetables/pagetables_aarch64.go
+++ b/pkg/ring0/pagetables/pagetables_aarch64.go
@@ -19,7 +19,7 @@ package pagetables
import (
"sync/atomic"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// archPageTables is architecture-specific data.
@@ -85,7 +85,7 @@ const (
// MapOpts are x86 options.
type MapOpts struct {
// AccessType defines permissions.
- AccessType usermem.AccessType
+ AccessType hostarch.AccessType
// Global indicates the page is globally accessible.
Global bool
@@ -120,7 +120,7 @@ func (p *PTE) Opts() MapOpts {
v := atomic.LoadUintptr((*uintptr)(p))
return MapOpts{
- AccessType: usermem.AccessType{
+ AccessType: hostarch.AccessType{
Read: true,
Write: v&readOnly == 0,
Execute: v&xn == 0,
diff --git a/pkg/ring0/pagetables/pagetables_amd64_test.go b/pkg/ring0/pagetables/pagetables_amd64_test.go
index 54e8e554f..a13c616ae 100644
--- a/pkg/ring0/pagetables/pagetables_amd64_test.go
+++ b/pkg/ring0/pagetables/pagetables_amd64_test.go
@@ -19,19 +19,19 @@ package pagetables
import (
"testing"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
func Test2MAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a huge page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: usermem.Read}, pmdSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
+ pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite}},
- {0x00007f0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: usermem.Read}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
+ {0x00007f0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read}},
})
}
@@ -39,12 +39,12 @@ func Test1GAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a super page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: usermem.Read}, pudSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
+ pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite}},
- {0x00007f0000000000, pudSize, pudSize * 47, MapOpts{AccessType: usermem.Read}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
+ {0x00007f0000000000, pudSize, pudSize * 47, MapOpts{AccessType: hostarch.Read}},
})
}
@@ -52,12 +52,12 @@ func TestSplit1GPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a super page and knock out the middle.
- pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: usermem.Read}, pudSize*42)
- pt.Unmap(usermem.Addr(0x00007f0000000000+pteSize), pudSize-(2*pteSize))
+ pt.Map(0x00007f0000000000, pudSize, MapOpts{AccessType: hostarch.Read}, pudSize*42)
+ pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pudSize-(2*pteSize))
checkMappings(t, pt, []mapping{
- {0x00007f0000000000, pteSize, pudSize * 42, MapOpts{AccessType: usermem.Read}},
- {0x00007f0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: usermem.Read}},
+ {0x00007f0000000000, pteSize, pudSize * 42, MapOpts{AccessType: hostarch.Read}},
+ {0x00007f0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: hostarch.Read}},
})
}
@@ -65,11 +65,11 @@ func TestSplit2MPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a huge page and knock out the middle.
- pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: usermem.Read}, pmdSize*42)
- pt.Unmap(usermem.Addr(0x00007f0000000000+pteSize), pmdSize-(2*pteSize))
+ pt.Map(0x00007f0000000000, pmdSize, MapOpts{AccessType: hostarch.Read}, pmdSize*42)
+ pt.Unmap(hostarch.Addr(0x00007f0000000000+pteSize), pmdSize-(2*pteSize))
checkMappings(t, pt, []mapping{
- {0x00007f0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: usermem.Read}},
- {0x00007f0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: usermem.Read}},
+ {0x00007f0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: hostarch.Read}},
+ {0x00007f0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: hostarch.Read}},
})
}
diff --git a/pkg/ring0/pagetables/pagetables_arm64_test.go b/pkg/ring0/pagetables/pagetables_arm64_test.go
index 2f73d424f..2514b9ac5 100644
--- a/pkg/ring0/pagetables/pagetables_arm64_test.go
+++ b/pkg/ring0/pagetables/pagetables_arm64_test.go
@@ -19,24 +19,24 @@ package pagetables
import (
"testing"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
func Test2MAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a huge page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite, User: true}, pteSize*42)
- pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: usermem.Read, User: true}, pmdSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: true}, pteSize*42)
+ pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: true}, pmdSize*47)
- pt.Map(0xffff000000400000, pteSize, MapOpts{AccessType: usermem.ReadWrite, User: false}, pteSize*42)
- pt.Map(0xffffff0000000000, pmdSize, MapOpts{AccessType: usermem.Read, User: false}, pmdSize*47)
+ pt.Map(0xffff000000400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: false}, pteSize*42)
+ pt.Map(0xffffff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: false}, pmdSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite, User: true}},
- {0x0000ff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: usermem.Read, User: true}},
- {0xffff000000400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite, User: false}},
- {0xffffff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: usermem.Read, User: false}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: true}},
+ {0x0000ff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read, User: true}},
+ {0xffff000000400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: false}},
+ {0xffffff0000000000, pmdSize, pmdSize * 47, MapOpts{AccessType: hostarch.Read, User: false}},
})
}
@@ -44,12 +44,12 @@ func Test1GAnd4K(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a small page and a super page.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite, User: true}, pteSize*42)
- pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: usermem.Read, User: true}, pudSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite, User: true}, pteSize*42)
+ pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: hostarch.Read, User: true}, pudSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite, User: true}},
- {0x0000ff0000000000, pudSize, pudSize * 47, MapOpts{AccessType: usermem.Read, User: true}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite, User: true}},
+ {0x0000ff0000000000, pudSize, pudSize * 47, MapOpts{AccessType: hostarch.Read, User: true}},
})
}
@@ -57,12 +57,12 @@ func TestSplit1GPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a super page and knock out the middle.
- pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: usermem.Read, User: true}, pudSize*42)
- pt.Unmap(usermem.Addr(0x0000ff0000000000+pteSize), pudSize-(2*pteSize))
+ pt.Map(0x0000ff0000000000, pudSize, MapOpts{AccessType: hostarch.Read, User: true}, pudSize*42)
+ pt.Unmap(hostarch.Addr(0x0000ff0000000000+pteSize), pudSize-(2*pteSize))
checkMappings(t, pt, []mapping{
- {0x0000ff0000000000, pteSize, pudSize * 42, MapOpts{AccessType: usermem.Read, User: true}},
- {0x0000ff0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: usermem.Read, User: true}},
+ {0x0000ff0000000000, pteSize, pudSize * 42, MapOpts{AccessType: hostarch.Read, User: true}},
+ {0x0000ff0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, MapOpts{AccessType: hostarch.Read, User: true}},
})
}
@@ -70,11 +70,11 @@ func TestSplit2MPage(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map a huge page and knock out the middle.
- pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: usermem.Read, User: true}, pmdSize*42)
- pt.Unmap(usermem.Addr(0x0000ff0000000000+pteSize), pmdSize-(2*pteSize))
+ pt.Map(0x0000ff0000000000, pmdSize, MapOpts{AccessType: hostarch.Read, User: true}, pmdSize*42)
+ pt.Unmap(hostarch.Addr(0x0000ff0000000000+pteSize), pmdSize-(2*pteSize))
checkMappings(t, pt, []mapping{
- {0x0000ff0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: usermem.Read, User: true}},
- {0x0000ff0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: usermem.Read, User: true}},
+ {0x0000ff0000000000, pteSize, pmdSize * 42, MapOpts{AccessType: hostarch.Read, User: true}},
+ {0x0000ff0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, MapOpts{AccessType: hostarch.Read, User: true}},
})
}
diff --git a/pkg/ring0/pagetables/pagetables_test.go b/pkg/ring0/pagetables/pagetables_test.go
index 772f4fc5e..df93dcb6a 100644
--- a/pkg/ring0/pagetables/pagetables_test.go
+++ b/pkg/ring0/pagetables/pagetables_test.go
@@ -15,9 +15,8 @@
package pagetables
import (
+ "gvisor.dev/gvisor/pkg/hostarch"
"testing"
-
- "gvisor.dev/gvisor/pkg/usermem"
)
type mapping struct {
@@ -90,7 +89,7 @@ func TestUnmap(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map and unmap one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
pt.Unmap(0x400000, pteSize)
checkMappings(t, pt, nil)
@@ -100,10 +99,10 @@ func TestReadOnly(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.Read}, pteSize*42)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*42)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.Read}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.Read}},
})
}
@@ -111,10 +110,10 @@ func TestReadWrite(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map one entry.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
})
}
@@ -122,12 +121,12 @@ func TestSerialEntries(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map two sequential entries.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
- pt.Map(0x401000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
+ pt.Map(0x401000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite}},
- {0x401000, pteSize, pteSize * 47, MapOpts{AccessType: usermem.ReadWrite}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
+ {0x401000, pteSize, pteSize * 47, MapOpts{AccessType: hostarch.ReadWrite}},
})
}
@@ -135,11 +134,11 @@ func TestSpanningEntries(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Span a pgd with two pages.
- pt.Map(0x00007efffffff000, 2*pteSize, MapOpts{AccessType: usermem.Read}, pteSize*42)
+ pt.Map(0x00007efffffff000, 2*pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*42)
checkMappings(t, pt, []mapping{
- {0x00007efffffff000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.Read}},
- {0x00007f0000000000, pteSize, pteSize * 43, MapOpts{AccessType: usermem.Read}},
+ {0x00007efffffff000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.Read}},
+ {0x00007f0000000000, pteSize, pteSize * 43, MapOpts{AccessType: hostarch.Read}},
})
}
@@ -147,11 +146,11 @@ func TestSparseEntries(t *testing.T) {
pt := New(NewRuntimeAllocator())
// Map two entries in different pgds.
- pt.Map(0x400000, pteSize, MapOpts{AccessType: usermem.ReadWrite}, pteSize*42)
- pt.Map(0x00007f0000000000, pteSize, MapOpts{AccessType: usermem.Read}, pteSize*47)
+ pt.Map(0x400000, pteSize, MapOpts{AccessType: hostarch.ReadWrite}, pteSize*42)
+ pt.Map(0x00007f0000000000, pteSize, MapOpts{AccessType: hostarch.Read}, pteSize*47)
checkMappings(t, pt, []mapping{
- {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: usermem.ReadWrite}},
- {0x00007f0000000000, pteSize, pteSize * 47, MapOpts{AccessType: usermem.Read}},
+ {0x400000, pteSize, pteSize * 42, MapOpts{AccessType: hostarch.ReadWrite}},
+ {0x00007f0000000000, pteSize, pteSize * 47, MapOpts{AccessType: hostarch.Read}},
})
}
diff --git a/pkg/ring0/pagetables/pagetables_x86.go b/pkg/ring0/pagetables/pagetables_x86.go
index 32edd2f0a..e43698173 100644
--- a/pkg/ring0/pagetables/pagetables_x86.go
+++ b/pkg/ring0/pagetables/pagetables_x86.go
@@ -19,7 +19,7 @@ package pagetables
import (
"sync/atomic"
- "gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/hostarch"
)
// archPageTables is architecture-specific data.
@@ -63,7 +63,7 @@ const (
// MapOpts are x86 options.
type MapOpts struct {
// AccessType defines permissions.
- AccessType usermem.AccessType
+ AccessType hostarch.AccessType
// Global indicates the page is globally accessible.
Global bool
@@ -97,7 +97,7 @@ func (p *PTE) Valid() bool {
func (p *PTE) Opts() MapOpts {
v := atomic.LoadUintptr((*uintptr)(p))
return MapOpts{
- AccessType: usermem.AccessType{
+ AccessType: hostarch.AccessType{
Read: v&present != 0,
Write: v&writable != 0,
Execute: v&executeDisable == 0,