summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm/vma.go
diff options
context:
space:
mode:
authorReapor-Yurnero <reapor.yurnero@gmail.com>2020-05-20 22:48:41 -0700
committergVisor bot <gvisor-bot@google.com>2020-05-20 22:50:07 -0700
commit059879e14301660c9fce1e5e59bdfaef89fc4aaf (patch)
treec5fff00ceb659fc53f752ebce8c71d5514335541 /pkg/sentry/mm/vma.go
parent8298c5bd4d1f836ee4c531a7bf04acff05d7099b (diff)
Implement gap tracking in the segment set.
This change was derived from a change by: Reapor-Yurnero <reapor.yurnero@gmail.com> And has been modified by: Adin Scannell <ascannell@google.com> (The original change author is preserved for the commit.) This change implements gap tracking in the segment set by adding additional information in each node, and using that information to speed up gap finding from a linear scan to a O(log(n)) walk of the tree. This gap tracking is optional, and will default to off except for segment instances that set gapTracking equal to 1 in their const lists. PiperOrigin-RevId: 312621607
Diffstat (limited to 'pkg/sentry/mm/vma.go')
-rw-r--r--pkg/sentry/mm/vma.go4
1 files changed, 2 insertions, 2 deletions
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index 9a14e69e6..16d8207e9 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -195,7 +195,7 @@ func (mm *MemoryManager) applicationAddrRange() usermem.AddrRange {
// Preconditions: mm.mappingMu must be locked.
func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextGap() {
+ for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextLargeEnoughGap(usermem.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift up to match the alignment?
if offset := uint64(gr.Start) % alignment; offset != 0 {
@@ -214,7 +214,7 @@ func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bou
// Preconditions: mm.mappingMu must be locked.
func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) {
- for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevGap() {
+ for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevLargeEnoughGap(usermem.Addr(length)) {
if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length {
// Can we shift down to match the alignment?
start := gr.End - usermem.Addr(length)