diff options
author | Adin Scannell <ascannell@google.com> | 2018-05-30 15:13:36 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-05-30 15:14:44 -0700 |
commit | c59475599dbcc226e1ef516f40b581d6f2f3be75 (patch) | |
tree | 26eec98c27286aecb2ec91ee1f2c3484677c59d9 /pkg/sentry/platform/ring0/pagetables/pagetables.go | |
parent | 812e83d3bbb99d4fa1ece4712a1ac85e84fe6ec3 (diff) |
Change ring0 & page tables arguments to structs.
This is a refactor of ring0 and ring0/pagetables that changes from
individual arguments to opts structures. This should involve no
functional changes, but sets the stage for subsequent changes.
PiperOrigin-RevId: 198627556
Change-Id: Id4460340f6a73f0c793cd879324398139cd58ae9
Diffstat (limited to 'pkg/sentry/platform/ring0/pagetables/pagetables.go')
-rw-r--r-- | pkg/sentry/platform/ring0/pagetables/pagetables.go | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables.go b/pkg/sentry/platform/ring0/pagetables/pagetables.go index ee7f27601..2df6792f7 100644 --- a/pkg/sentry/platform/ring0/pagetables/pagetables.go +++ b/pkg/sentry/platform/ring0/pagetables/pagetables.go @@ -117,8 +117,8 @@ func (p *PageTables) getPageTable(n *Node, index int) *Node { // True is returned iff there was a previous mapping in the range. // // Precondition: addr & length must be aligned, their sum must not overflow. -func (p *PageTables) Map(addr usermem.Addr, length uintptr, user bool, at usermem.AccessType, physical uintptr) bool { - if at == usermem.NoAccess { +func (p *PageTables) Map(addr usermem.Addr, length uintptr, opts MapOpts, physical uintptr) bool { + if !opts.AccessType.Any() { return p.Unmap(addr, length) } prev := false @@ -129,7 +129,7 @@ func (p *PageTables) Map(addr usermem.Addr, length uintptr, user bool, at userme } p.iterateRange(uintptr(addr), uintptr(end), true, func(s, e uintptr, pte *PTE, align uintptr) { p := physical + (s - uintptr(addr)) - prev = prev || (pte.Valid() && (p != pte.Address() || at.Write != pte.Writeable() || at.Execute != pte.Executable())) + prev = prev || (pte.Valid() && (p != pte.Address() || opts != pte.Opts())) if p&align != 0 { // We will install entries at a smaller granulaity if // we don't install a valid entry here, however we must @@ -137,7 +137,7 @@ func (p *PageTables) Map(addr usermem.Addr, length uintptr, user bool, at userme pte.Clear() return } - pte.Set(p, at.Write, at.Execute, user) + pte.Set(p, opts) }) p.mu.Unlock() return prev @@ -167,7 +167,7 @@ func (p *PageTables) Release() { } // Lookup returns the physical address for the given virtual address. -func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, accessType usermem.AccessType) { +func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, opts MapOpts) { mask := uintptr(usermem.PageSize - 1) off := uintptr(addr) & mask addr = addr &^ usermem.Addr(mask) @@ -176,13 +176,9 @@ func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, accessType use return } physical = pte.Address() + (s - uintptr(addr)) + off - accessType = usermem.AccessType{ - Read: true, - Write: pte.Writeable(), - Execute: pte.Executable(), - } + opts = pte.Opts() }) - return physical, accessType + return } // allocNode allocates a new page. |