summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
authorJamie Liu <jamieliu@google.com>2021-01-25 11:56:16 -0800
committergVisor bot <gvisor-bot@google.com>2021-01-25 11:58:18 -0800
commit032d14a06611304b18db659513f6db1edbc7cf75 (patch)
treee447fe2c6d1630b9be4957deb485220006696a7e /pkg/sentry/mm
parent0592dac851addb9b95c9c56b90587d809a894832 (diff)
Remove synchronous decommit for MADV_DONTNEED on private anonymous vmas.
PiperOrigin-RevId: 353697719
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/syscalls.go27
1 files changed, 4 insertions, 23 deletions
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 675efdc7c..69e37330b 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -1055,18 +1055,11 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
mm.activeMu.Lock()
defer mm.activeMu.Unlock()
- // Linux's mm/madvise.c:madvise_dontneed() => mm/memory.c:zap_page_range()
- // is analogous to our mm.invalidateLocked(ar, true, true). We inline this
- // here, with the special case that we synchronously decommit
- // uniquely-owned (non-copy-on-write) pages for private anonymous vma,
- // which is the common case for MADV_DONTNEED. Invalidating these pmas, and
- // allowing them to be reallocated when touched again, increases pma
- // fragmentation, which may significantly reduce performance for
- // non-vectored I/O implementations. Also, decommitting synchronously
- // ensures that Decommit immediately reduces host memory usage.
+ // This is invalidateLocked(invalidatePrivate=true, invalidateShared=true),
+ // with the additional wrinkle that we must refuse to invalidate pmas under
+ // mlocked vmas.
var didUnmapAS bool
pseg := mm.pmas.LowerBoundSegment(ar.Start)
- mf := mm.mfp.MemoryFile()
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
vma := vseg.ValuePtr()
if vma.mlockMode != memmap.MLockNone {
@@ -1081,20 +1074,8 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error {
}
}
for pseg.Ok() && pseg.Start() < vsegAR.End {
- pma := pseg.ValuePtr()
- if pma.private && !mm.isPMACopyOnWriteLocked(vseg, pseg) {
- psegAR := pseg.Range().Intersect(ar)
- if vsegAR.IsSupersetOf(psegAR) && vma.mappable == nil {
- if err := mf.Decommit(pseg.fileRangeOf(psegAR)); err == nil {
- pseg = pseg.NextSegment()
- continue
- }
- // If an error occurs, fall through to the general
- // invalidation case below.
- }
- }
pseg = mm.pmas.Isolate(pseg, vsegAR)
- pma = pseg.ValuePtr()
+ pma := pseg.ValuePtr()
if !didUnmapAS {
// Unmap all of ar, not just pseg.Range(), to minimize host
// syscalls. AddressSpace mappings must be removed before