diff options
author | Adin Scannell <ascannell@google.com> | 2020-06-05 17:24:12 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-06-05 17:25:28 -0700 |
commit | 527d08f6afdea1e142c76b8abb2266525a98c2ea (patch) | |
tree | 9bdd320cc5479192a46f0ad44ab00ec83a8b00b1 /pkg/sentry/platform/ring0/pagetables | |
parent | 8d8dce418f7e4053f80b035ff257743b431859d9 (diff) |
Add +checkescape annotations to kvm/ring0.
This analysis also catches a potential bug, which is a split on mapPhysical.
This would have led to potential guest-exit during Mapping (although this
would have been handled by the now-unecessary retryInGuest loop).
PiperOrigin-RevId: 315025106
Diffstat (limited to 'pkg/sentry/platform/ring0/pagetables')
-rw-r--r-- | pkg/sentry/platform/ring0/pagetables/allocator.go | 11 | ||||
-rw-r--r-- | pkg/sentry/platform/ring0/pagetables/pagetables.go | 8 |
2 files changed, 16 insertions, 3 deletions
diff --git a/pkg/sentry/platform/ring0/pagetables/allocator.go b/pkg/sentry/platform/ring0/pagetables/allocator.go index 23fd5c352..8d75b7599 100644 --- a/pkg/sentry/platform/ring0/pagetables/allocator.go +++ b/pkg/sentry/platform/ring0/pagetables/allocator.go @@ -53,9 +53,14 @@ type RuntimeAllocator struct { // NewRuntimeAllocator returns an allocator that uses runtime allocation. func NewRuntimeAllocator() *RuntimeAllocator { - return &RuntimeAllocator{ - used: make(map[*PTEs]struct{}), - } + r := new(RuntimeAllocator) + r.Init() + return r +} + +// Init initializes a RuntimeAllocator. +func (r *RuntimeAllocator) Init() { + r.used = make(map[*PTEs]struct{}) } // Recycle returns freed pages to the pool. diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables.go b/pkg/sentry/platform/ring0/pagetables/pagetables.go index 87e88e97d..7f18ac296 100644 --- a/pkg/sentry/platform/ring0/pagetables/pagetables.go +++ b/pkg/sentry/platform/ring0/pagetables/pagetables.go @@ -86,6 +86,8 @@ func (*mapVisitor) requiresSplit() bool { return true } // // Precondition: addr & length must be page-aligned, their sum must not overflow. // +// +checkescape:hard,stack +// //go:nosplit func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool { if !opts.AccessType.Any() { @@ -128,6 +130,8 @@ func (v *unmapVisitor) visit(start uintptr, pte *PTE, align uintptr) { // // Precondition: addr & length must be page-aligned. // +// +checkescape:hard,stack +// //go:nosplit func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool { w := unmapWalker{ @@ -162,6 +166,8 @@ func (v *emptyVisitor) visit(start uintptr, pte *PTE, align uintptr) { // // Precondition: addr & length must be page-aligned. // +// +checkescape:hard,stack +// //go:nosplit func (p *PageTables) IsEmpty(addr usermem.Addr, length uintptr) bool { w := emptyWalker{ @@ -197,6 +203,8 @@ func (*lookupVisitor) requiresSplit() bool { return false } // Lookup returns the physical address for the given virtual address. // +// +checkescape:hard,stack +// //go:nosplit func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, opts MapOpts) { mask := uintptr(usermem.PageSize - 1) |