diff options
author | Jamie Liu <jamieliu@google.com> | 2018-12-17 11:37:38 -0800 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-12-17 11:38:59 -0800 |
commit | 2421006426445a1827422c2dbdd6fc6a47087147 (patch) | |
tree | 49aa2bc113c208fc117aff8a036866a7260090e5 /pkg/sentry/mm/address_space.go | |
parent | 54694086dfb02a6f8453f043a44ffd10bb5a7070 (diff) |
Implement mlock(), kind of.
Currently mlock() and friends do nothing whatsoever. However, mlocking
is directly application-visible in a number of ways; for example,
madvise(MADV_DONTNEED) and msync(MS_INVALIDATE) both fail on mlocked
regions. We handle this inconsistently: MADV_DONTNEED is too important
to not work, but MS_INVALIDATE is rejected.
Change MM to track mlocked regions in a manner consistent with Linux.
It still will not actually pin pages into host physical memory, but:
- mlock() will now cause sentry memory management to precommit mlocked
pages.
- MADV_DONTNEED and MS_INVALIDATE will interact with mlocked pages as
described above.
PiperOrigin-RevId: 225861605
Change-Id: Iee187204979ac9a4d15d0e037c152c0902c8d0ee
Diffstat (limited to 'pkg/sentry/mm/address_space.go')
-rw-r--r-- | pkg/sentry/mm/address_space.go | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go index 7488f7c4a..e7aa24c69 100644 --- a/pkg/sentry/mm/address_space.go +++ b/pkg/sentry/mm/address_space.go @@ -149,7 +149,7 @@ func (mm *MemoryManager) Deactivate() { // for all addresses in ar should be precommitted. // // Preconditions: mm.activeMu must be locked. mm.as != nil. ar.Length() != 0. -// ar must be page-aligned. pseg.Range().Contains(ar.Start). +// ar must be page-aligned. pseg == mm.pmas.LowerBoundSegment(ar.Start). func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error { // By default, map entire pmas at a time, under the assumption that there // is no cost to mapping more of a pma than necessary. @@ -173,7 +173,9 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, pre } } - for { + // Since this checks ar.End and not mapAR.End, we will never map a pma that + // is not required. + for pseg.Ok() && pseg.Start() < ar.End { pma := pseg.ValuePtr() pmaAR := pseg.Range() pmaMapAR := pmaAR.Intersect(mapAR) @@ -184,13 +186,9 @@ func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, pre if err := pma.file.MapInto(mm.as, pmaMapAR.Start, pseg.fileRangeOf(pmaMapAR), perms, precommit); err != nil { return err } - // Since this checks ar.End and not mapAR.End, we will never map a pma - // that is not required. - if ar.End <= pmaAR.End { - return nil - } pseg = pseg.NextSegment() } + return nil } // unmapASLocked removes all AddressSpace mappings for addresses in ar. |