summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/platform/filemem/filemem.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/platform/filemem/filemem.go')
-rw-r--r--pkg/sentry/platform/filemem/filemem.go65
1 files changed, 58 insertions, 7 deletions
diff --git a/pkg/sentry/platform/filemem/filemem.go b/pkg/sentry/platform/filemem/filemem.go
index 45ef98eb0..6c8b95578 100644
--- a/pkg/sentry/platform/filemem/filemem.go
+++ b/pkg/sentry/platform/filemem/filemem.go
@@ -105,6 +105,12 @@ type FileMem struct {
usageSwapped uint64
usageLast time.Time
+ // minUnallocatedPage is the minimum page that may be unallocated.
+ // i.e., there are no unallocated pages below minUnallocatedPage.
+ //
+ // minUnallocatedPage is protected by mu.
+ minUnallocatedPage uint64
+
// fileSize is the size of the backing memory file in bytes. fileSize is
// always a power-of-two multiple of chunkSize.
//
@@ -119,6 +125,12 @@ type FileMem struct {
// is protected by mu.
reclaimable bool
+ // minReclaimablePage is the minimum page that may be reclaimable.
+ // i.e., all reclaimable pages are >= minReclaimablePage.
+ //
+ // minReclaimablePage is protected by mu.
+ minReclaimablePage uint64
+
// reclaimCond is signaled (with mu locked) when reclaimable or destroyed
// transitions from false to true.
reclaimCond sync.Cond
@@ -162,6 +174,9 @@ const (
chunkMask = chunkSize - 1
initialSize = chunkSize
+
+ // maxPage is the highest 64-bit page.
+ maxPage = math.MaxUint64 &^ (usermem.PageSize - 1)
)
// newFromFile creates a FileMem backed by the given file.
@@ -172,6 +187,9 @@ func newFromFile(file *os.File) (*FileMem, error) {
f := &FileMem{
fileSize: initialSize,
file: file,
+ // No pages are reclaimable. DecRef will always be able to
+ // decrease minReclaimablePage from this point.
+ minReclaimablePage: maxPage,
}
f.reclaimCond.L = &f.mu
f.mappings.Store(make([]uintptr, initialSize/chunkSize))
@@ -242,7 +260,7 @@ func (f *FileMem) Allocate(length uint64, kind usage.MemoryKind) (platform.FileR
alignment = usermem.HugePageSize
}
- start := findUnallocatedRange(&f.usage, length, alignment)
+ start, minUnallocatedPage := findUnallocatedRange(&f.usage, f.minUnallocatedPage, length, alignment)
end := start + length
// File offsets are int64s. Since length must be strictly positive, end
// cannot legitimately be 0.
@@ -281,17 +299,36 @@ func (f *FileMem) Allocate(length uint64, kind usage.MemoryKind) (platform.FileR
}) {
panic(fmt.Sprintf("allocating %v: failed to insert into f.usage:\n%v", fr, &f.usage))
}
+
+ if minUnallocatedPage < start {
+ f.minUnallocatedPage = minUnallocatedPage
+ } else {
+ // start was the first unallocated page. The next must be
+ // somewhere beyond end.
+ f.minUnallocatedPage = end
+ }
+
return fr, nil
}
-func findUnallocatedRange(usage *usageSet, length, alignment uint64) uint64 {
+// findUnallocatedRange returns the first unallocated page in usage of the
+// specified length and alignment beginning at page start and the first single
+// unallocated page.
+func findUnallocatedRange(usage *usageSet, start, length, alignment uint64) (uint64, uint64) {
+ // Only searched until the first page is found.
+ firstPage := start
+ foundFirstPage := false
alignMask := alignment - 1
- var start uint64
- for seg := usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := usage.LowerBoundSegment(start); seg.Ok(); seg = seg.NextSegment() {
r := seg.Range()
+
+ if !foundFirstPage && r.Start > firstPage {
+ foundFirstPage = true
+ }
+
if start >= r.End {
// start was rounded up to an alignment boundary from the end
- // of a previous segment.
+ // of a previous segment and is now beyond r.End.
continue
}
// This segment represents allocated or reclaimable pages; only the
@@ -301,8 +338,11 @@ func findUnallocatedRange(usage *usageSet, length, alignment uint64) uint64 {
break
}
start = (r.End + alignMask) &^ alignMask
+ if !foundFirstPage {
+ firstPage = r.End
+ }
}
- return start
+ return start, firstPage
}
// fallocate(2) modes, defined in Linux's include/uapi/linux/falloc.h.
@@ -418,12 +458,15 @@ func (f *FileMem) findReclaimable() (platform.FileRange, bool) {
// Allocate returns the first usable range in offset order and is
// currently a linear scan, so reclaiming from the beginning of the
// file minimizes the expected latency of Allocate.
- for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := f.usage.LowerBoundSegment(f.minReclaimablePage); seg.Ok(); seg = seg.NextSegment() {
if seg.ValuePtr().refs == 0 {
+ f.minReclaimablePage = seg.End()
return seg.Range(), true
}
}
f.reclaimable = false
+ // No pages are reclaimable.
+ f.minReclaimablePage = maxPage
}
}
@@ -450,6 +493,10 @@ func (f *FileMem) markReclaimed(fr platform.FileRange) {
// caller of markReclaimed may not have decommitted it, so we can only mark
// fr as reclaimed.
f.usage.Remove(f.usage.Isolate(seg, fr))
+ if fr.Start < f.minUnallocatedPage {
+ // We've deallocated at least one lower page.
+ f.minUnallocatedPage = fr.Start
+ }
}
// MapInto implements platform.File.MapInto.
@@ -533,6 +580,10 @@ func (f *FileMem) DecRef(fr platform.FileRange) {
f.usage.MergeAdjacent(fr)
if freed {
+ if fr.Start < f.minReclaimablePage {
+ // We've freed at least one lower page.
+ f.minReclaimablePage = fr.Start
+ }
f.reclaimable = true
f.reclaimCond.Signal()
}