diff options
author | gVisor bot <gvisor-bot@google.com> | 2021-03-29 20:35:44 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2021-03-29 20:35:44 +0000 |
commit | 08cc017c088017546ed712cce700bf4374c864c0 (patch) | |
tree | af024e69d8855f4f867ef435ced35532b368a981 /pkg/ring0 | |
parent | 6a422755602daeaef4be60969c1acddc8b7b3041 (diff) | |
parent | 8a2f7e716dcc62f04d2808e8ade34941c94fc956 (diff) |
Merge release-20210322.0-29-g8a2f7e716 (automated)
Diffstat (limited to 'pkg/ring0')
-rw-r--r-- | pkg/ring0/defs_impl_amd64.go | 4 | ||||
-rw-r--r-- | pkg/ring0/defs_impl_arm64.go | 4 | ||||
-rw-r--r-- | pkg/ring0/kernel_amd64.go | 16 | ||||
-rw-r--r-- | pkg/ring0/pagetables/allocator_unsafe.go | 10 | ||||
-rw-r--r-- | pkg/ring0/pagetables/pagetables.go | 18 | ||||
-rw-r--r-- | pkg/ring0/pagetables/pagetables_aarch64.go | 6 | ||||
-rw-r--r-- | pkg/ring0/pagetables/pagetables_x86.go | 6 |
7 files changed, 32 insertions, 32 deletions
diff --git a/pkg/ring0/defs_impl_amd64.go b/pkg/ring0/defs_impl_amd64.go index 411ea9126..cd8f735c0 100644 --- a/pkg/ring0/defs_impl_amd64.go +++ b/pkg/ring0/defs_impl_amd64.go @@ -7,10 +7,10 @@ package ring0 import ( "fmt" "gvisor.dev/gvisor/pkg/cpuid" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" - "gvisor.dev/gvisor/pkg/usermem" "io" "reflect" ) @@ -112,7 +112,7 @@ var ( UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1) // MaximumUserAddress is the largest possible user address. - MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1) + MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1) // KernelStartAddress is the starting kernel address. KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1) diff --git a/pkg/ring0/defs_impl_arm64.go b/pkg/ring0/defs_impl_arm64.go index 578c6b822..90f6af9fc 100644 --- a/pkg/ring0/defs_impl_arm64.go +++ b/pkg/ring0/defs_impl_arm64.go @@ -6,10 +6,10 @@ package ring0 import ( "fmt" + "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0/pagetables" "gvisor.dev/gvisor/pkg/sentry/arch" "gvisor.dev/gvisor/pkg/sentry/arch/fpu" - "gvisor.dev/gvisor/pkg/usermem" "io" "reflect" ) @@ -216,7 +216,7 @@ var ( UserspaceSize = uintptr(1) << (VirtualAddressBits()) // MaximumUserAddress is the largest possible user address. - MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1) + MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(hostarch.PageSize-1) // KernelStartAddress is the starting kernel address. KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1) diff --git a/pkg/ring0/kernel_amd64.go b/pkg/ring0/kernel_amd64.go index 33c259757..92d2330cb 100644 --- a/pkg/ring0/kernel_amd64.go +++ b/pkg/ring0/kernel_amd64.go @@ -20,7 +20,7 @@ import ( "encoding/binary" "reflect" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // init initializes architecture-specific state. @@ -34,7 +34,7 @@ func (k *Kernel) init(maxCPUs int) { entries = make([]kernelEntry, maxCPUs+padding-1) totalSize := entrySize * uintptr(maxCPUs+padding-1) addr := reflect.ValueOf(&entries[0]).Pointer() - if addr&(usermem.PageSize-1) == 0 && totalSize >= usermem.PageSize { + if addr&(hostarch.PageSize-1) == 0 && totalSize >= hostarch.PageSize { // The runtime forces power-of-2 alignment for allocations, and we are therefore // safe once the first address is aligned and the chunk is at least a full page. break @@ -44,10 +44,10 @@ func (k *Kernel) init(maxCPUs int) { k.cpuEntries = entries k.globalIDT = &idt64{} - if reflect.TypeOf(idt64{}).Size() != usermem.PageSize { + if reflect.TypeOf(idt64{}).Size() != hostarch.PageSize { panic("Size of globalIDT should be PageSize") } - if reflect.ValueOf(k.globalIDT).Pointer()&(usermem.PageSize-1) != 0 { + if reflect.ValueOf(k.globalIDT).Pointer()&(hostarch.PageSize-1) != 0 { panic("Allocated globalIDT should be page aligned") } @@ -71,13 +71,13 @@ func (k *Kernel) EntryRegions() map[uintptr]uintptr { addr := reflect.ValueOf(&k.cpuEntries[0]).Pointer() size := reflect.TypeOf(kernelEntry{}).Size() * uintptr(len(k.cpuEntries)) - end, _ := usermem.Addr(addr + size).RoundUp() - regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end) + end, _ := hostarch.Addr(addr + size).RoundUp() + regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end) addr = reflect.ValueOf(k.globalIDT).Pointer() size = reflect.TypeOf(idt64{}).Size() - end, _ = usermem.Addr(addr + size).RoundUp() - regions[uintptr(usermem.Addr(addr).RoundDown())] = uintptr(end) + end, _ = hostarch.Addr(addr + size).RoundUp() + regions[uintptr(hostarch.Addr(addr).RoundDown())] = uintptr(end) return regions } diff --git a/pkg/ring0/pagetables/allocator_unsafe.go b/pkg/ring0/pagetables/allocator_unsafe.go index d08bfdeb3..191d0942b 100644 --- a/pkg/ring0/pagetables/allocator_unsafe.go +++ b/pkg/ring0/pagetables/allocator_unsafe.go @@ -17,23 +17,23 @@ package pagetables import ( "unsafe" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // newAlignedPTEs returns a set of aligned PTEs. func newAlignedPTEs() *PTEs { ptes := new(PTEs) - offset := physicalFor(ptes) & (usermem.PageSize - 1) + offset := physicalFor(ptes) & (hostarch.PageSize - 1) if offset == 0 { // Already aligned. return ptes } // Need to force an aligned allocation. - unaligned := make([]byte, (2*usermem.PageSize)-1) - offset = uintptr(unsafe.Pointer(&unaligned[0])) & (usermem.PageSize - 1) + unaligned := make([]byte, (2*hostarch.PageSize)-1) + offset = uintptr(unsafe.Pointer(&unaligned[0])) & (hostarch.PageSize - 1) if offset != 0 { - offset = usermem.PageSize - offset + offset = hostarch.PageSize - offset } return (*PTEs)(unsafe.Pointer(&unaligned[offset])) } diff --git a/pkg/ring0/pagetables/pagetables.go b/pkg/ring0/pagetables/pagetables.go index 8c0a6aa82..3f17fba49 100644 --- a/pkg/ring0/pagetables/pagetables.go +++ b/pkg/ring0/pagetables/pagetables.go @@ -21,7 +21,7 @@ package pagetables import ( - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // PageTables is a set of page tables. @@ -142,7 +142,7 @@ func (*mapVisitor) requiresSplit() bool { return true } // // +checkescape:hard,stack //go:nosplit -func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool { +func (p *PageTables) Map(addr hostarch.Addr, length uintptr, opts MapOpts, physical uintptr) bool { if p.readOnlyShared { panic("Should not modify read-only shared pagetables.") } @@ -198,7 +198,7 @@ func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) bool { // // +checkescape:hard,stack //go:nosplit -func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool { +func (p *PageTables) Unmap(addr hostarch.Addr, length uintptr) bool { if p.readOnlyShared { panic("Should not modify read-only shared pagetables.") } @@ -249,7 +249,7 @@ func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) bool { // // +checkescape:hard,stack //go:nosplit -func (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool { +func (p *PageTables) IsEmpty(addr hostarch.Addr, length uintptr) bool { w := emptyWalker{ pageTables: p, } @@ -298,9 +298,9 @@ func (*lookupVisitor) requiresSplit() bool { return false } // // +checkescape:hard,stack //go:nosplit -func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem.Addr, physical, size uintptr, opts MapOpts) { - mask := uintptr(usermem.PageSize - 1) - addr &^= usermem.Addr(mask) +func (p *PageTables) Lookup(addr hostarch.Addr, findFirst bool) (virtual hostarch.Addr, physical, size uintptr, opts MapOpts) { + mask := uintptr(hostarch.PageSize - 1) + addr &^= hostarch.Addr(mask) w := lookupWalker{ pageTables: p, visitor: lookupVisitor{ @@ -308,12 +308,12 @@ func (p *PageTables) Lookup(addr usermem.Addr, findFirst bool) (virtual usermem. findFirst: findFirst, }, } - end := ^usermem.Addr(0) &^ usermem.Addr(mask) + end := ^hostarch.Addr(0) &^ hostarch.Addr(mask) if !findFirst { end = addr + 1 } w.iterateRange(uintptr(addr), uintptr(end)) - return usermem.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts + return hostarch.Addr(w.visitor.target), w.visitor.physical, w.visitor.size, w.visitor.opts } // MarkReadOnlyShared marks the pagetables read-only and can be shared. diff --git a/pkg/ring0/pagetables/pagetables_aarch64.go b/pkg/ring0/pagetables/pagetables_aarch64.go index 163a3aea3..86eb00a4f 100644 --- a/pkg/ring0/pagetables/pagetables_aarch64.go +++ b/pkg/ring0/pagetables/pagetables_aarch64.go @@ -19,7 +19,7 @@ package pagetables import ( "sync/atomic" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // archPageTables is architecture-specific data. @@ -85,7 +85,7 @@ const ( // MapOpts are x86 options. type MapOpts struct { // AccessType defines permissions. - AccessType usermem.AccessType + AccessType hostarch.AccessType // Global indicates the page is globally accessible. Global bool @@ -120,7 +120,7 @@ func (p *PTE) Opts() MapOpts { v := atomic.LoadUintptr((*uintptr)(p)) return MapOpts{ - AccessType: usermem.AccessType{ + AccessType: hostarch.AccessType{ Read: true, Write: v&readOnly == 0, Execute: v&xn == 0, diff --git a/pkg/ring0/pagetables/pagetables_x86.go b/pkg/ring0/pagetables/pagetables_x86.go index 32edd2f0a..e43698173 100644 --- a/pkg/ring0/pagetables/pagetables_x86.go +++ b/pkg/ring0/pagetables/pagetables_x86.go @@ -19,7 +19,7 @@ package pagetables import ( "sync/atomic" - "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/hostarch" ) // archPageTables is architecture-specific data. @@ -63,7 +63,7 @@ const ( // MapOpts are x86 options. type MapOpts struct { // AccessType defines permissions. - AccessType usermem.AccessType + AccessType hostarch.AccessType // Global indicates the page is globally accessible. Global bool @@ -97,7 +97,7 @@ func (p *PTE) Valid() bool { func (p *PTE) Opts() MapOpts { v := atomic.LoadUintptr((*uintptr)(p)) return MapOpts{ - AccessType: usermem.AccessType{ + AccessType: hostarch.AccessType{ Read: v&present != 0, Write: v&writable != 0, Execute: v&executeDisable == 0, |