summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2018-07-16 18:18:06 -0700
committerShentubot <shentubot@google.com>2018-07-16 18:19:01 -0700
commit14d06064d26b1cd9e2ccad08ebe997e704092eb8 (patch)
treeef22fe6e69d29871a85dc93642514ddc7fde9985 /pkg/sentry
parent8f21c0bb2807888d812318def43c2405c9b13f5a (diff)
Start allocation and reclaim scans only where they may find a match
If usageSet is heavily fragmented, findUnallocatedRange and findReclaimable can spend excessive cycles linearly scanning the set for unallocated/free pages. Improve common cases by beginning the scan only at the first page that could possibly contain an unallocated/free page. This metadata only guarantees that there is no lower unallocated/free page, but a scan may still be required (especially for multi-page allocations). That said, this heuristic can still provide significant performance improvements for certain applications. PiperOrigin-RevId: 204841833 Change-Id: Ic41ad33bf9537ecd673a6f5852ab353bf63ea1e6
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/platform/filemem/filemem.go65
-rw-r--r--pkg/sentry/platform/filemem/filemem_test.go106
2 files changed, 134 insertions, 37 deletions
diff --git a/pkg/sentry/platform/filemem/filemem.go b/pkg/sentry/platform/filemem/filemem.go
index 45ef98eb0..6c8b95578 100644
--- a/pkg/sentry/platform/filemem/filemem.go
+++ b/pkg/sentry/platform/filemem/filemem.go
@@ -105,6 +105,12 @@ type FileMem struct {
usageSwapped uint64
usageLast time.Time
+ // minUnallocatedPage is the minimum page that may be unallocated.
+ // i.e., there are no unallocated pages below minUnallocatedPage.
+ //
+ // minUnallocatedPage is protected by mu.
+ minUnallocatedPage uint64
+
// fileSize is the size of the backing memory file in bytes. fileSize is
// always a power-of-two multiple of chunkSize.
//
@@ -119,6 +125,12 @@ type FileMem struct {
// is protected by mu.
reclaimable bool
+ // minReclaimablePage is the minimum page that may be reclaimable.
+ // i.e., all reclaimable pages are >= minReclaimablePage.
+ //
+ // minReclaimablePage is protected by mu.
+ minReclaimablePage uint64
+
// reclaimCond is signaled (with mu locked) when reclaimable or destroyed
// transitions from false to true.
reclaimCond sync.Cond
@@ -162,6 +174,9 @@ const (
chunkMask = chunkSize - 1
initialSize = chunkSize
+
+ // maxPage is the highest 64-bit page.
+ maxPage = math.MaxUint64 &^ (usermem.PageSize - 1)
)
// newFromFile creates a FileMem backed by the given file.
@@ -172,6 +187,9 @@ func newFromFile(file *os.File) (*FileMem, error) {
f := &FileMem{
fileSize: initialSize,
file: file,
+ // No pages are reclaimable. DecRef will always be able to
+ // decrease minReclaimablePage from this point.
+ minReclaimablePage: maxPage,
}
f.reclaimCond.L = &f.mu
f.mappings.Store(make([]uintptr, initialSize/chunkSize))
@@ -242,7 +260,7 @@ func (f *FileMem) Allocate(length uint64, kind usage.MemoryKind) (platform.FileR
alignment = usermem.HugePageSize
}
- start := findUnallocatedRange(&f.usage, length, alignment)
+ start, minUnallocatedPage := findUnallocatedRange(&f.usage, f.minUnallocatedPage, length, alignment)
end := start + length
// File offsets are int64s. Since length must be strictly positive, end
// cannot legitimately be 0.
@@ -281,17 +299,36 @@ func (f *FileMem) Allocate(length uint64, kind usage.MemoryKind) (platform.FileR
}) {
panic(fmt.Sprintf("allocating %v: failed to insert into f.usage:\n%v", fr, &f.usage))
}
+
+ if minUnallocatedPage < start {
+ f.minUnallocatedPage = minUnallocatedPage
+ } else {
+ // start was the first unallocated page. The next must be
+ // somewhere beyond end.
+ f.minUnallocatedPage = end
+ }
+
return fr, nil
}
-func findUnallocatedRange(usage *usageSet, length, alignment uint64) uint64 {
+// findUnallocatedRange returns the first unallocated page in usage of the
+// specified length and alignment beginning at page start and the first single
+// unallocated page.
+func findUnallocatedRange(usage *usageSet, start, length, alignment uint64) (uint64, uint64) {
+ // Only searched until the first page is found.
+ firstPage := start
+ foundFirstPage := false
alignMask := alignment - 1
- var start uint64
- for seg := usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := usage.LowerBoundSegment(start); seg.Ok(); seg = seg.NextSegment() {
r := seg.Range()
+
+ if !foundFirstPage && r.Start > firstPage {
+ foundFirstPage = true
+ }
+
if start >= r.End {
// start was rounded up to an alignment boundary from the end
- // of a previous segment.
+ // of a previous segment and is now beyond r.End.
continue
}
// This segment represents allocated or reclaimable pages; only the
@@ -301,8 +338,11 @@ func findUnallocatedRange(usage *usageSet, length, alignment uint64) uint64 {
break
}
start = (r.End + alignMask) &^ alignMask
+ if !foundFirstPage {
+ firstPage = r.End
+ }
}
- return start
+ return start, firstPage
}
// fallocate(2) modes, defined in Linux's include/uapi/linux/falloc.h.
@@ -418,12 +458,15 @@ func (f *FileMem) findReclaimable() (platform.FileRange, bool) {
// Allocate returns the first usable range in offset order and is
// currently a linear scan, so reclaiming from the beginning of the
// file minimizes the expected latency of Allocate.
- for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := f.usage.LowerBoundSegment(f.minReclaimablePage); seg.Ok(); seg = seg.NextSegment() {
if seg.ValuePtr().refs == 0 {
+ f.minReclaimablePage = seg.End()
return seg.Range(), true
}
}
f.reclaimable = false
+ // No pages are reclaimable.
+ f.minReclaimablePage = maxPage
}
}
@@ -450,6 +493,10 @@ func (f *FileMem) markReclaimed(fr platform.FileRange) {
// caller of markReclaimed may not have decommitted it, so we can only mark
// fr as reclaimed.
f.usage.Remove(f.usage.Isolate(seg, fr))
+ if fr.Start < f.minUnallocatedPage {
+ // We've deallocated at least one lower page.
+ f.minUnallocatedPage = fr.Start
+ }
}
// MapInto implements platform.File.MapInto.
@@ -533,6 +580,10 @@ func (f *FileMem) DecRef(fr platform.FileRange) {
f.usage.MergeAdjacent(fr)
if freed {
+ if fr.Start < f.minReclaimablePage {
+ // We've freed at least one lower page.
+ f.minReclaimablePage = fr.Start
+ }
f.reclaimable = true
f.reclaimCond.Signal()
}
diff --git a/pkg/sentry/platform/filemem/filemem_test.go b/pkg/sentry/platform/filemem/filemem_test.go
index 46ffcf116..4b165dc48 100644
--- a/pkg/sentry/platform/filemem/filemem_test.go
+++ b/pkg/sentry/platform/filemem/filemem_test.go
@@ -27,18 +27,22 @@ const (
func TestFindUnallocatedRange(t *testing.T) {
for _, test := range []struct {
- desc string
- usage *usageSegmentDataSlices
- length uint64
- alignment uint64
- start uint64
+ desc string
+ usage *usageSegmentDataSlices
+ start uint64
+ length uint64
+ alignment uint64
+ unallocated uint64
+ minUnallocated uint64
}{
{
- desc: "Initial allocation succeeds",
- usage: &usageSegmentDataSlices{},
- length: page,
- alignment: page,
- start: 0,
+ desc: "Initial allocation succeeds",
+ usage: &usageSegmentDataSlices{},
+ start: 0,
+ length: page,
+ alignment: page,
+ unallocated: 0,
+ minUnallocated: 0,
},
{
desc: "Allocation begins at start of file",
@@ -47,9 +51,11 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{2 * page},
Values: []usageInfo{{refs: 1}},
},
- length: page,
- alignment: page,
- start: 0,
+ start: 0,
+ length: page,
+ alignment: page,
+ unallocated: 0,
+ minUnallocated: 0,
},
{
desc: "In-use frames are not allocatable",
@@ -58,9 +64,11 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{page, 2 * page},
Values: []usageInfo{{refs: 1}, {refs: 2}},
},
- length: page,
- alignment: page,
- start: 2 * page,
+ start: 0,
+ length: page,
+ alignment: page,
+ unallocated: 2 * page,
+ minUnallocated: 2 * page,
},
{
desc: "Reclaimable frames are not allocatable",
@@ -69,9 +77,11 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{page, 2 * page, 3 * page},
Values: []usageInfo{{refs: 1}, {refs: 0}, {refs: 1}},
},
- length: page,
- alignment: page,
- start: 3 * page,
+ start: 0,
+ length: page,
+ alignment: page,
+ unallocated: 3 * page,
+ minUnallocated: 3 * page,
},
{
desc: "Gaps between in-use frames are allocatable",
@@ -80,9 +90,11 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{page, 3 * page},
Values: []usageInfo{{refs: 1}, {refs: 1}},
},
- length: page,
- alignment: page,
- start: page,
+ start: 0,
+ length: page,
+ alignment: page,
+ unallocated: page,
+ minUnallocated: page,
},
{
desc: "Inadequately-sized gaps are rejected",
@@ -91,9 +103,11 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{page, 3 * page},
Values: []usageInfo{{refs: 1}, {refs: 1}},
},
- length: 2 * page,
- alignment: page,
- start: 3 * page,
+ start: 0,
+ length: 2 * page,
+ alignment: page,
+ unallocated: 3 * page,
+ minUnallocated: page,
},
{
desc: "Hugepage alignment is honored",
@@ -104,9 +118,37 @@ func TestFindUnallocatedRange(t *testing.T) {
End: []uint64{page, hugepage + 2*page},
Values: []usageInfo{{refs: 1}, {refs: 1}},
},
- length: hugepage,
- alignment: hugepage,
- start: 2 * hugepage,
+ start: 0,
+ length: hugepage,
+ alignment: hugepage,
+ unallocated: 2 * hugepage,
+ minUnallocated: page,
+ },
+ {
+ desc: "Pages before start ignored",
+ usage: &usageSegmentDataSlices{
+ Start: []uint64{page, 3 * page},
+ End: []uint64{2 * page, 4 * page},
+ Values: []usageInfo{{refs: 1}, {refs: 2}},
+ },
+ start: page,
+ length: page,
+ alignment: page,
+ unallocated: 2 * page,
+ minUnallocated: 2 * page,
+ },
+ {
+ desc: "start may be in the middle of segment",
+ usage: &usageSegmentDataSlices{
+ Start: []uint64{0, 3 * page},
+ End: []uint64{2 * page, 4 * page},
+ Values: []usageInfo{{refs: 1}, {refs: 2}},
+ },
+ start: page,
+ length: page,
+ alignment: page,
+ unallocated: 2 * page,
+ minUnallocated: 2 * page,
},
} {
t.Run(test.desc, func(t *testing.T) {
@@ -114,8 +156,12 @@ func TestFindUnallocatedRange(t *testing.T) {
if err := usage.ImportSortedSlices(test.usage); err != nil {
t.Fatalf("Failed to initialize usage from %v: %v", test.usage, err)
}
- if got, want := findUnallocatedRange(&usage, test.length, test.alignment), test.start; got != want {
- t.Errorf("findUnallocatedRange(%v, %d, %d): got %d, wanted %d", test.usage, test.length, test.alignment, got, want)
+ unallocated, minUnallocated := findUnallocatedRange(&usage, test.start, test.length, test.alignment)
+ if unallocated != test.unallocated {
+ t.Errorf("findUnallocatedRange(%v, %x, %x, %x): got unallocated %x, wanted %x", test.usage, test.start, test.length, test.alignment, unallocated, test.unallocated)
+ }
+ if minUnallocated != test.minUnallocated {
+ t.Errorf("findUnallocatedRange(%v, %x, %x, %x): got minUnallocated %x, wanted %x", test.usage, test.start, test.length, test.alignment, minUnallocated, test.minUnallocated)
}
})
}