summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/platform/ring0/pagetables/pagetables.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/platform/ring0/pagetables/pagetables.go')
-rw-r--r--pkg/sentry/platform/ring0/pagetables/pagetables.go192
1 files changed, 156 insertions, 36 deletions
diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables.go b/pkg/sentry/platform/ring0/pagetables/pagetables.go
index 6963ba62d..ff5787f89 100644
--- a/pkg/sentry/platform/ring0/pagetables/pagetables.go
+++ b/pkg/sentry/platform/ring0/pagetables/pagetables.go
@@ -13,6 +13,11 @@
// limitations under the License.
// Package pagetables provides a generic implementation of pagetables.
+//
+// The core functions must be safe to call from a nosplit context. Furthermore,
+// this pagetables implementation goes to lengths to ensure that all functions
+// are free from runtime allocation. Calls to NewPTEs/FreePTEs may be made
+// during walks, but these can be cached elsewhere if required.
package pagetables
import (
@@ -38,64 +43,179 @@ type PageTables struct {
// New returns new PageTables.
func New(a Allocator) *PageTables {
- p := &PageTables{Allocator: a}
+ p := new(PageTables)
+ p.Init(a)
+ return p
+}
+
+// Init initializes a set of PageTables.
+//
+//go:nosplit
+func (p *PageTables) Init(allocator Allocator) {
+ p.Allocator = allocator
p.root = p.Allocator.NewPTEs()
p.rootPhysical = p.Allocator.PhysicalFor(p.root)
- return p
}
+// mapVisitor is used for map.
+type mapVisitor struct {
+ target uintptr // Input.
+ physical uintptr // Input.
+ opts MapOpts // Input.
+ prev bool // Output.
+}
+
+// visit is used for map.
+//
+//go:nosplit
+func (v *mapVisitor) visit(start uintptr, pte *PTE, align uintptr) {
+ p := v.physical + (start - uintptr(v.target))
+ if pte.Valid() && (pte.Address() != p || pte.Opts() != v.opts) {
+ v.prev = true
+ }
+ if p&align != 0 {
+ // We will install entries at a smaller granulaity if we don't
+ // install a valid entry here, however we must zap any existing
+ // entry to ensure this happens.
+ pte.Clear()
+ return
+ }
+ pte.Set(p, v.opts)
+}
+
+//go:nosplit
+func (*mapVisitor) requiresAlloc() bool { return true }
+
+//go:nosplit
+func (*mapVisitor) requiresSplit() bool { return true }
+
// Map installs a mapping with the given physical address.
//
// True is returned iff there was a previous mapping in the range.
//
-// Precondition: addr & length must be aligned, their sum must not overflow.
+// Precondition: addr & length must be page-aligned, their sum must not overflow.
+//
+//go:nosplit
func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool {
if !opts.AccessType.Any() {
return p.Unmap(addr, length)
}
- prev := false
- end, ok := addr.AddLength(uint64(length))
- if !ok {
- panic("pagetables.Map: overflow")
+ w := mapWalker{
+ pageTables: p,
+ visitor: mapVisitor{
+ target: uintptr(addr),
+ physical: physical,
+ opts: opts,
+ },
}
- p.iterateRange(uintptr(addr), uintptr(end), true, func(s, e uintptr, pte *PTE, align uintptr) {
- p := physical + (s - uintptr(addr))
- prev = prev || (pte.Valid() && (p != pte.Address() || opts != pte.Opts()))
- if p&align != 0 {
- // We will install entries at a smaller granulaity if
- // we don't install a valid entry here, however we must
- // zap any existing entry to ensure this happens.
- pte.Clear()
- return
- }
- pte.Set(p, opts)
- })
- return prev
+ w.iterateRange(uintptr(addr), uintptr(addr)+length)
+ return w.visitor.prev
+}
+
+// unmapVisitor is used for unmap.
+type unmapVisitor struct {
+ count int
+}
+
+//go:nosplit
+func (*unmapVisitor) requiresAlloc() bool { return false }
+
+//go:nosplit
+func (*unmapVisitor) requiresSplit() bool { return true }
+
+// visit unmaps the given entry.
+//
+//go:nosplit
+func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) {
+ pte.Clear()
+ v.count++
}
// Unmap unmaps the given range.
//
// True is returned iff there was a previous mapping in the range.
+//
+// Precondition: addr & length must be page-aligned.
+//
+//go:nosplit
func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool {
- count := 0
- p.iterateRange(uintptr(addr), uintptr(addr)+length, false, func(s, e uintptr, pte *PTE, align uintptr) {
- pte.Clear()
- count++
- })
- return count > 0
+ w := unmapWalker{
+ pageTables: p,
+ visitor: unmapVisitor{
+ count: 0,
+ },
+ }
+ w.iterateRange(uintptr(addr), uintptr(addr)+length)
+ return w.visitor.count > 0
}
+// emptyVisitor is used for emptiness checks.
+type emptyVisitor struct {
+ count int
+}
+
+//go:nosplit
+func (*emptyVisitor) requiresAlloc() bool { return false }
+
+//go:nosplit
+func (*emptyVisitor) requiresSplit() bool { return false }
+
+// visit unmaps the given entry.
+//
+//go:nosplit
+func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) {
+ v.count++
+}
+
+// IsEmpty checks if the given range is empty.
+//
+// Precondition: addr & length must be page-aligned.
+//
+//go:nosplit
+func (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool {
+ w := emptyWalker{
+ pageTables: p,
+ }
+ w.iterateRange(uintptr(addr), uintptr(addr)+length)
+ return w.visitor.count == 0
+}
+
+// lookupVisitor is used for lookup.
+type lookupVisitor struct {
+ target uintptr // Input.
+ physical uintptr // Output.
+ opts MapOpts // Output.
+}
+
+// visit matches the given address.
+//
+//go:nosplit
+func (v *lookupVisitor) visit(start uintptr, pte *PTE, align uintptr) {
+ if !pte.Valid() {
+ return
+ }
+ v.physical = pte.Address() + (start - uintptr(v.target))
+ v.opts = pte.Opts()
+}
+
+//go:nosplit
+func (*lookupVisitor) requiresAlloc() bool { return false }
+
+//go:nosplit
+func (*lookupVisitor) requiresSplit() bool { return false }
+
// Lookup returns the physical address for the given virtual address.
+//
+//go:nosplit
func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, opts MapOpts) {
mask := uintptr(usermem.PageSize - 1)
- off := uintptr(addr) & mask
- addr = addr &^ usermem.Addr(mask)
- p.iterateRange(uintptr(addr), uintptr(addr+usermem.PageSize), false, func(s, e uintptr, pte *PTE, align uintptr) {
- if !pte.Valid() {
- return
- }
- physical = pte.Address() + (s - uintptr(addr)) + off
- opts = pte.Opts()
- })
- return
+ offset := uintptr(addr) & mask
+ w := lookupWalker{
+ pageTables: p,
+ visitor: lookupVisitor{
+ target: uintptr(addr &^ usermem.Addr(mask)),
+ },
+ }
+ w.iterateRange(uintptr(addr), uintptr(addr)+1)
+ return w.visitor.physical + offset, w.visitor.opts
}