diff options
author | gVisor bot <gvisor-bot@google.com> | 2021-01-25 20:14:16 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2021-01-25 20:14:16 +0000 |
commit | 0089f50137a50484f47b4e90ccea7955552eaf47 (patch) | |
tree | 88076769635927298a935382103522601a14ce5c | |
parent | 478118b522b66a45b25070a0b13db5ca1d4d23f5 (diff) | |
parent | 032d14a06611304b18db659513f6db1edbc7cf75 (diff) |
Merge release-20210112.0-84-g032d14a06 (automated)
-rw-r--r-- | pkg/sentry/mm/syscalls.go | 27 |
1 files changed, 4 insertions, 23 deletions
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go index 675efdc7c..69e37330b 100644 --- a/pkg/sentry/mm/syscalls.go +++ b/pkg/sentry/mm/syscalls.go @@ -1055,18 +1055,11 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error { mm.activeMu.Lock() defer mm.activeMu.Unlock() - // Linux's mm/madvise.c:madvise_dontneed() => mm/memory.c:zap_page_range() - // is analogous to our mm.invalidateLocked(ar, true, true). We inline this - // here, with the special case that we synchronously decommit - // uniquely-owned (non-copy-on-write) pages for private anonymous vma, - // which is the common case for MADV_DONTNEED. Invalidating these pmas, and - // allowing them to be reallocated when touched again, increases pma - // fragmentation, which may significantly reduce performance for - // non-vectored I/O implementations. Also, decommitting synchronously - // ensures that Decommit immediately reduces host memory usage. + // This is invalidateLocked(invalidatePrivate=true, invalidateShared=true), + // with the additional wrinkle that we must refuse to invalidate pmas under + // mlocked vmas. var didUnmapAS bool pseg := mm.pmas.LowerBoundSegment(ar.Start) - mf := mm.mfp.MemoryFile() for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() { vma := vseg.ValuePtr() if vma.mlockMode != memmap.MLockNone { @@ -1081,20 +1074,8 @@ func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error { } } for pseg.Ok() && pseg.Start() < vsegAR.End { - pma := pseg.ValuePtr() - if pma.private && !mm.isPMACopyOnWriteLocked(vseg, pseg) { - psegAR := pseg.Range().Intersect(ar) - if vsegAR.IsSupersetOf(psegAR) && vma.mappable == nil { - if err := mf.Decommit(pseg.fileRangeOf(psegAR)); err == nil { - pseg = pseg.NextSegment() - continue - } - // If an error occurs, fall through to the general - // invalidation case below. - } - } pseg = mm.pmas.Isolate(pseg, vsegAR) - pma = pseg.ValuePtr() + pma := pseg.ValuePtr() if !didUnmapAS { // Unmap all of ar, not just pseg.Range(), to minimize host // syscalls. AddressSpace mappings must be removed before |