summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/fs')
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/anon/anon_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/dev/dev_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/dev/net_tun.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/dirent_list.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/event_list.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fs_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/dirty_set_impl.go377
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/file_range_set_impl.go377
-rw-r--r--pkg/sentry/fs/fsutil/frame_ref_set.go40
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/frame_ref_set_impl.go377
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go6
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/fsutil_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/gofer/fifo.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/gofer/gofer_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/host.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/host_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/host_unsafe_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/util_amd64_unsafe.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/host/util_arm64_unsafe.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/lock/lock_range.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/lock/lock_set.go377
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/lock/lock_state_autogen.go2
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/proc/device/device_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/proc/proc_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/ramfs/ramfs_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/sys/sys_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/timerfd/timerfd_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/tty/tty_state_autogen.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/user/user.go0
-rw-r--r--[-rwxr-xr-x]pkg/sentry/fs/user/user_state_autogen.go0
36 files changed, 1540 insertions, 16 deletions
diff --git a/pkg/sentry/fs/anon/anon_state_autogen.go b/pkg/sentry/fs/anon/anon_state_autogen.go
index b2b1a466e..b2b1a466e 100755..100644
--- a/pkg/sentry/fs/anon/anon_state_autogen.go
+++ b/pkg/sentry/fs/anon/anon_state_autogen.go
diff --git a/pkg/sentry/fs/dev/dev_state_autogen.go b/pkg/sentry/fs/dev/dev_state_autogen.go
index 272f02672..272f02672 100755..100644
--- a/pkg/sentry/fs/dev/dev_state_autogen.go
+++ b/pkg/sentry/fs/dev/dev_state_autogen.go
diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go
index dc7ad075a..dc7ad075a 100755..100644
--- a/pkg/sentry/fs/dev/net_tun.go
+++ b/pkg/sentry/fs/dev/net_tun.go
diff --git a/pkg/sentry/fs/dirent_list.go b/pkg/sentry/fs/dirent_list.go
index ecbbd7883..ecbbd7883 100755..100644
--- a/pkg/sentry/fs/dirent_list.go
+++ b/pkg/sentry/fs/dirent_list.go
diff --git a/pkg/sentry/fs/event_list.go b/pkg/sentry/fs/event_list.go
index 167fdf906..167fdf906 100755..100644
--- a/pkg/sentry/fs/event_list.go
+++ b/pkg/sentry/fs/event_list.go
diff --git a/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go b/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go
index 9ed7a3d41..9ed7a3d41 100755..100644
--- a/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go
+++ b/pkg/sentry/fs/fdpipe/fdpipe_state_autogen.go
diff --git a/pkg/sentry/fs/fs_state_autogen.go b/pkg/sentry/fs/fs_state_autogen.go
index 502a90d71..502a90d71 100755..100644
--- a/pkg/sentry/fs/fs_state_autogen.go
+++ b/pkg/sentry/fs/fs_state_autogen.go
diff --git a/pkg/sentry/fs/fsutil/dirty_set_impl.go b/pkg/sentry/fs/fsutil/dirty_set_impl.go
index 2510b81b3..8d462c412 100755..100644
--- a/pkg/sentry/fs/fsutil/dirty_set_impl.go
+++ b/pkg/sentry/fs/fsutil/dirty_set_impl.go
@@ -9,6 +9,34 @@ import (
"fmt"
)
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const DirtytrackGaps = 0
+
+var _ = uint8(DirtytrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type DirtydynamicGap [DirtytrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *DirtydynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *DirtydynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
const (
// minDegree is the minimum degree of an internal node in a Set B-tree.
//
@@ -267,8 +295,12 @@ func (s *DirtySet) Insert(gap DirtyGapIterator, r __generics_imported0.MappableR
}
if prev.Ok() && prev.End() == r.Start {
if mval, ok := (dirtySetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := DirtytrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
prev.SetEndUnchecked(r.End)
prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
if next.Ok() && next.Start() == r.End {
val = mval
if mval, ok := (dirtySetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
@@ -282,11 +314,16 @@ func (s *DirtySet) Insert(gap DirtyGapIterator, r __generics_imported0.MappableR
}
if next.Ok() && next.Start() == r.End {
if mval, ok := (dirtySetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := DirtytrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
next.SetStartUnchecked(r.Start)
next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return next
}
}
+
return s.InsertWithoutMergingUnchecked(gap, r, val)
}
@@ -313,11 +350,15 @@ func (s *DirtySet) InsertWithoutMerging(gap DirtyGapIterator, r __generics_impor
// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
func (s *DirtySet) InsertWithoutMergingUnchecked(gap DirtyGapIterator, r __generics_imported0.MappableRange, val DirtyInfo) DirtyIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := DirtytrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
gap.node.keys[gap.index] = r
gap.node.values[gap.index] = val
gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return DirtyIterator{gap.node, gap.index}
}
@@ -332,12 +373,20 @@ func (s *DirtySet) Remove(seg DirtyIterator) DirtyGapIterator {
seg.SetRangeUnchecked(victim.Range())
seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if DirtytrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
return s.Remove(victim).NextGap()
}
copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
dirtySetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
seg.node.nrSegments--
+ if DirtytrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
return seg.node.rebalanceAfterRemove(DirtyGapIterator{seg.node, seg.index})
}
@@ -387,6 +436,7 @@ func (s *DirtySet) MergeUnchecked(first, second DirtyIterator) DirtyIterator {
first.SetEndUnchecked(second.End())
first.SetValue(mval)
+
return s.Remove(second).PrevSegment()
}
}
@@ -562,6 +612,12 @@ type Dirtynode struct {
// than "isLeaf" because false must be the correct value for an empty root.
hasChildren bool
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap DirtydynamicGap
+
// Nodes store keys and values in separate arrays to maximize locality in
// the common case (scanning keys for lookup).
keys [DirtymaxDegree - 1]__generics_imported0.MappableRange
@@ -607,12 +663,12 @@ func (n *Dirtynode) nextSibling() *Dirtynode {
// required for insertion, and returns an updated iterator to the position
// represented by gap.
func (n *Dirtynode) rebalanceBeforeInsert(gap DirtyGapIterator) DirtyGapIterator {
- if n.parent != nil {
- gap = n.parent.rebalanceBeforeInsert(gap)
- }
if n.nrSegments < DirtymaxDegree-1 {
return gap
}
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
if n.parent == nil {
left := &Dirtynode{
@@ -648,6 +704,11 @@ func (n *Dirtynode) rebalanceBeforeInsert(gap DirtyGapIterator) DirtyGapIterator
n.hasChildren = true
n.children[0] = left
n.children[1] = right
+
+ if DirtytrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
if gap.node != n {
return gap
}
@@ -685,6 +746,11 @@ func (n *Dirtynode) rebalanceBeforeInsert(gap DirtyGapIterator) DirtyGapIterator
}
n.nrSegments = DirtyminDegree - 1
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
if gap.node != n {
return gap
}
@@ -730,6 +796,11 @@ func (n *Dirtynode) rebalanceAfterRemove(gap DirtyGapIterator) DirtyGapIterator
}
n.nrSegments++
sibling.nrSegments--
+
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling && gap.index == sibling.nrSegments {
return DirtyGapIterator{n, 0}
}
@@ -758,6 +829,11 @@ func (n *Dirtynode) rebalanceAfterRemove(gap DirtyGapIterator) DirtyGapIterator
}
n.nrSegments++
sibling.nrSegments--
+
+ if DirtytrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling {
if gap.index == 0 {
return DirtyGapIterator{n, n.nrSegments}
@@ -790,6 +866,7 @@ func (n *Dirtynode) rebalanceAfterRemove(gap DirtyGapIterator) DirtyGapIterator
p.children[0] = nil
p.children[1] = nil
}
+
if gap.node == left {
return DirtyGapIterator{p, gap.index}
}
@@ -836,10 +913,146 @@ func (n *Dirtynode) rebalanceAfterRemove(gap DirtyGapIterator) DirtyGapIterator
p.children[p.nrSegments] = nil
p.nrSegments--
+ if DirtytrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
n = p
}
}
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *Dirtynode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *Dirtynode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *Dirtynode) calculateMaxGapLeaf() uint64 {
+ max := DirtyGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (DirtyGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *Dirtynode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Dirtynode) searchFirstLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if n.maxGap.Get() < minSize {
+ return DirtyGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := DirtyGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Dirtynode) searchLastLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if n.maxGap.Get() < minSize {
+ return DirtyGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := DirtyGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
// A Iterator is conceptually one of:
//
// - A pointer to a segment in a set; or
@@ -1145,6 +1358,114 @@ func (gap DirtyGapIterator) NextGap() DirtyGapIterator {
return seg.NextGap()
}
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap DirtyGapIterator) NextLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if DirtytrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap DirtyGapIterator) nextLargeEnoughGapHelper(minSize uint64) DirtyGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return DirtyGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap DirtyGapIterator) PrevLargeEnoughGap(minSize uint64) DirtyGapIterator {
+ if DirtytrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap DirtyGapIterator) prevLargeEnoughGapHelper(minSize uint64) DirtyGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return DirtyGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
// segmentBeforePosition returns the predecessor segment of the position given
// by n.children[i], which may or may not contain a child. If no such segment
// exists, segmentBeforePosition returns a terminal iterator.
@@ -1211,7 +1532,15 @@ func (n *Dirtynode) writeDebugString(buf *bytes.Buffer, prefix string) {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
}
buf.WriteString(prefix)
- buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ if n.hasChildren {
+ if DirtytrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
}
if child := n.children[n.nrSegments]; child != nil {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
@@ -1263,6 +1592,46 @@ func (s *DirtySet) ImportSortedSlices(sds *DirtySegmentDataSlices) error {
}
return nil
}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *DirtySet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.MappableRange, DirtyInfo) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *DirtySet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
func (s *DirtySet) saveRoot() *DirtySegmentDataSlices {
return s.ExportSortedSlices()
}
diff --git a/pkg/sentry/fs/fsutil/file_range_set_impl.go b/pkg/sentry/fs/fsutil/file_range_set_impl.go
index 01e7a2401..e5b6d1041 100755..100644
--- a/pkg/sentry/fs/fsutil/file_range_set_impl.go
+++ b/pkg/sentry/fs/fsutil/file_range_set_impl.go
@@ -9,6 +9,34 @@ import (
"fmt"
)
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const FileRangetrackGaps = 0
+
+var _ = uint8(FileRangetrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type FileRangedynamicGap [FileRangetrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FileRangedynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FileRangedynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
const (
// minDegree is the minimum degree of an internal node in a Set B-tree.
//
@@ -267,8 +295,12 @@ func (s *FileRangeSet) Insert(gap FileRangeGapIterator, r __generics_imported0.M
}
if prev.Ok() && prev.End() == r.Start {
if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
prev.SetEndUnchecked(r.End)
prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
if next.Ok() && next.Start() == r.End {
val = mval
if mval, ok := (FileRangeSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
@@ -282,11 +314,16 @@ func (s *FileRangeSet) Insert(gap FileRangeGapIterator, r __generics_imported0.M
}
if next.Ok() && next.Start() == r.End {
if mval, ok := (FileRangeSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := FileRangetrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
next.SetStartUnchecked(r.Start)
next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return next
}
}
+
return s.InsertWithoutMergingUnchecked(gap, r, val)
}
@@ -313,11 +350,15 @@ func (s *FileRangeSet) InsertWithoutMerging(gap FileRangeGapIterator, r __generi
// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
func (s *FileRangeSet) InsertWithoutMergingUnchecked(gap FileRangeGapIterator, r __generics_imported0.MappableRange, val uint64) FileRangeIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := FileRangetrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
gap.node.keys[gap.index] = r
gap.node.values[gap.index] = val
gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return FileRangeIterator{gap.node, gap.index}
}
@@ -332,12 +373,20 @@ func (s *FileRangeSet) Remove(seg FileRangeIterator) FileRangeGapIterator {
seg.SetRangeUnchecked(victim.Range())
seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if FileRangetrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
return s.Remove(victim).NextGap()
}
copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
FileRangeSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
seg.node.nrSegments--
+ if FileRangetrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
return seg.node.rebalanceAfterRemove(FileRangeGapIterator{seg.node, seg.index})
}
@@ -387,6 +436,7 @@ func (s *FileRangeSet) MergeUnchecked(first, second FileRangeIterator) FileRange
first.SetEndUnchecked(second.End())
first.SetValue(mval)
+
return s.Remove(second).PrevSegment()
}
}
@@ -562,6 +612,12 @@ type FileRangenode struct {
// than "isLeaf" because false must be the correct value for an empty root.
hasChildren bool
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap FileRangedynamicGap
+
// Nodes store keys and values in separate arrays to maximize locality in
// the common case (scanning keys for lookup).
keys [FileRangemaxDegree - 1]__generics_imported0.MappableRange
@@ -607,12 +663,12 @@ func (n *FileRangenode) nextSibling() *FileRangenode {
// required for insertion, and returns an updated iterator to the position
// represented by gap.
func (n *FileRangenode) rebalanceBeforeInsert(gap FileRangeGapIterator) FileRangeGapIterator {
- if n.parent != nil {
- gap = n.parent.rebalanceBeforeInsert(gap)
- }
if n.nrSegments < FileRangemaxDegree-1 {
return gap
}
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
if n.parent == nil {
left := &FileRangenode{
@@ -648,6 +704,11 @@ func (n *FileRangenode) rebalanceBeforeInsert(gap FileRangeGapIterator) FileRang
n.hasChildren = true
n.children[0] = left
n.children[1] = right
+
+ if FileRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
if gap.node != n {
return gap
}
@@ -685,6 +746,11 @@ func (n *FileRangenode) rebalanceBeforeInsert(gap FileRangeGapIterator) FileRang
}
n.nrSegments = FileRangeminDegree - 1
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
if gap.node != n {
return gap
}
@@ -730,6 +796,11 @@ func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRange
}
n.nrSegments++
sibling.nrSegments--
+
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling && gap.index == sibling.nrSegments {
return FileRangeGapIterator{n, 0}
}
@@ -758,6 +829,11 @@ func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRange
}
n.nrSegments++
sibling.nrSegments--
+
+ if FileRangetrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling {
if gap.index == 0 {
return FileRangeGapIterator{n, n.nrSegments}
@@ -790,6 +866,7 @@ func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRange
p.children[0] = nil
p.children[1] = nil
}
+
if gap.node == left {
return FileRangeGapIterator{p, gap.index}
}
@@ -836,10 +913,146 @@ func (n *FileRangenode) rebalanceAfterRemove(gap FileRangeGapIterator) FileRange
p.children[p.nrSegments] = nil
p.nrSegments--
+ if FileRangetrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
n = p
}
}
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *FileRangenode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *FileRangenode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *FileRangenode) calculateMaxGapLeaf() uint64 {
+ max := FileRangeGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (FileRangeGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *FileRangenode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FileRangenode) searchFirstLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FileRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := FileRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FileRangenode) searchLastLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FileRangeGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := FileRangeGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
// A Iterator is conceptually one of:
//
// - A pointer to a segment in a set; or
@@ -1145,6 +1358,114 @@ func (gap FileRangeGapIterator) NextGap() FileRangeGapIterator {
return seg.NextGap()
}
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FileRangeGapIterator) NextLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if FileRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap FileRangeGapIterator) nextLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FileRangeGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FileRangeGapIterator) PrevLargeEnoughGap(minSize uint64) FileRangeGapIterator {
+ if FileRangetrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap FileRangeGapIterator) prevLargeEnoughGapHelper(minSize uint64) FileRangeGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FileRangeGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
// segmentBeforePosition returns the predecessor segment of the position given
// by n.children[i], which may or may not contain a child. If no such segment
// exists, segmentBeforePosition returns a terminal iterator.
@@ -1211,7 +1532,15 @@ func (n *FileRangenode) writeDebugString(buf *bytes.Buffer, prefix string) {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
}
buf.WriteString(prefix)
- buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ if n.hasChildren {
+ if FileRangetrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
}
if child := n.children[n.nrSegments]; child != nil {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
@@ -1263,6 +1592,46 @@ func (s *FileRangeSet) ImportSortedSlices(sds *FileRangeSegmentDataSlices) error
}
return nil
}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *FileRangeSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.MappableRange, uint64) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *FileRangeSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
func (s *FileRangeSet) saveRoot() *FileRangeSegmentDataSlices {
return s.ExportSortedSlices()
}
diff --git a/pkg/sentry/fs/fsutil/frame_ref_set.go b/pkg/sentry/fs/fsutil/frame_ref_set.go
index 6564fd0c6..dd6f5aba6 100644
--- a/pkg/sentry/fs/fsutil/frame_ref_set.go
+++ b/pkg/sentry/fs/fsutil/frame_ref_set.go
@@ -18,6 +18,7 @@ import (
"math"
"gvisor.dev/gvisor/pkg/sentry/platform"
+ "gvisor.dev/gvisor/pkg/sentry/usage"
)
// FrameRefSetFunctions implements segment.Functions for FrameRefSet.
@@ -49,3 +50,42 @@ func (FrameRefSetFunctions) Merge(_ platform.FileRange, val1 uint64, _ platform.
func (FrameRefSetFunctions) Split(_ platform.FileRange, val uint64, _ uint64) (uint64, uint64) {
return val, val
}
+
+// IncRefAndAccount adds a reference on the range fr. All newly inserted segments
+// are accounted as host page cache memory mappings.
+func (refs *FrameRefSet) IncRefAndAccount(fr platform.FileRange) {
+ seg, gap := refs.Find(fr.Start)
+ for {
+ switch {
+ case seg.Ok() && seg.Start() < fr.End:
+ seg = refs.Isolate(seg, fr)
+ seg.SetValue(seg.Value() + 1)
+ seg, gap = seg.NextNonEmpty()
+ case gap.Ok() && gap.Start() < fr.End:
+ newRange := gap.Range().Intersect(fr)
+ usage.MemoryAccounting.Inc(newRange.Length(), usage.Mapped)
+ seg, gap = refs.InsertWithoutMerging(gap, newRange, 1).NextNonEmpty()
+ default:
+ refs.MergeAdjacent(fr)
+ return
+ }
+ }
+}
+
+// DecRefAndAccount removes a reference on the range fr and untracks segments
+// that are removed from memory accounting.
+func (refs *FrameRefSet) DecRefAndAccount(fr platform.FileRange) {
+ seg := refs.FindSegment(fr.Start)
+
+ for seg.Ok() && seg.Start() < fr.End {
+ seg = refs.Isolate(seg, fr)
+ if old := seg.Value(); old == 1 {
+ usage.MemoryAccounting.Dec(seg.Range().Length(), usage.Mapped)
+ seg = refs.Remove(seg).NextSegment()
+ } else {
+ seg.SetValue(old - 1)
+ seg = seg.NextSegment()
+ }
+ }
+ refs.MergeAdjacent(fr)
+}
diff --git a/pkg/sentry/fs/fsutil/frame_ref_set_impl.go b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
index 88695dbd1..413b037d1 100755..100644
--- a/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
+++ b/pkg/sentry/fs/fsutil/frame_ref_set_impl.go
@@ -9,6 +9,34 @@ import (
"fmt"
)
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const FrameReftrackGaps = 0
+
+var _ = uint8(FrameReftrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type FrameRefdynamicGap [FrameReftrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FrameRefdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *FrameRefdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
const (
// minDegree is the minimum degree of an internal node in a Set B-tree.
//
@@ -267,8 +295,12 @@ func (s *FrameRefSet) Insert(gap FrameRefGapIterator, r __generics_imported0.Fil
}
if prev.Ok() && prev.End() == r.Start {
if mval, ok := (FrameRefSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := FrameReftrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
prev.SetEndUnchecked(r.End)
prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
if next.Ok() && next.Start() == r.End {
val = mval
if mval, ok := (FrameRefSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
@@ -282,11 +314,16 @@ func (s *FrameRefSet) Insert(gap FrameRefGapIterator, r __generics_imported0.Fil
}
if next.Ok() && next.Start() == r.End {
if mval, ok := (FrameRefSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := FrameReftrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
next.SetStartUnchecked(r.Start)
next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return next
}
}
+
return s.InsertWithoutMergingUnchecked(gap, r, val)
}
@@ -313,11 +350,15 @@ func (s *FrameRefSet) InsertWithoutMerging(gap FrameRefGapIterator, r __generics
// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
func (s *FrameRefSet) InsertWithoutMergingUnchecked(gap FrameRefGapIterator, r __generics_imported0.FileRange, val uint64) FrameRefIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := FrameReftrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
gap.node.keys[gap.index] = r
gap.node.values[gap.index] = val
gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return FrameRefIterator{gap.node, gap.index}
}
@@ -332,12 +373,20 @@ func (s *FrameRefSet) Remove(seg FrameRefIterator) FrameRefGapIterator {
seg.SetRangeUnchecked(victim.Range())
seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if FrameReftrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
return s.Remove(victim).NextGap()
}
copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
FrameRefSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
seg.node.nrSegments--
+ if FrameReftrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
return seg.node.rebalanceAfterRemove(FrameRefGapIterator{seg.node, seg.index})
}
@@ -387,6 +436,7 @@ func (s *FrameRefSet) MergeUnchecked(first, second FrameRefIterator) FrameRefIte
first.SetEndUnchecked(second.End())
first.SetValue(mval)
+
return s.Remove(second).PrevSegment()
}
}
@@ -562,6 +612,12 @@ type FrameRefnode struct {
// than "isLeaf" because false must be the correct value for an empty root.
hasChildren bool
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap FrameRefdynamicGap
+
// Nodes store keys and values in separate arrays to maximize locality in
// the common case (scanning keys for lookup).
keys [FrameRefmaxDegree - 1]__generics_imported0.FileRange
@@ -607,12 +663,12 @@ func (n *FrameRefnode) nextSibling() *FrameRefnode {
// required for insertion, and returns an updated iterator to the position
// represented by gap.
func (n *FrameRefnode) rebalanceBeforeInsert(gap FrameRefGapIterator) FrameRefGapIterator {
- if n.parent != nil {
- gap = n.parent.rebalanceBeforeInsert(gap)
- }
if n.nrSegments < FrameRefmaxDegree-1 {
return gap
}
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
if n.parent == nil {
left := &FrameRefnode{
@@ -648,6 +704,11 @@ func (n *FrameRefnode) rebalanceBeforeInsert(gap FrameRefGapIterator) FrameRefGa
n.hasChildren = true
n.children[0] = left
n.children[1] = right
+
+ if FrameReftrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
if gap.node != n {
return gap
}
@@ -685,6 +746,11 @@ func (n *FrameRefnode) rebalanceBeforeInsert(gap FrameRefGapIterator) FrameRefGa
}
n.nrSegments = FrameRefminDegree - 1
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
if gap.node != n {
return gap
}
@@ -730,6 +796,11 @@ func (n *FrameRefnode) rebalanceAfterRemove(gap FrameRefGapIterator) FrameRefGap
}
n.nrSegments++
sibling.nrSegments--
+
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling && gap.index == sibling.nrSegments {
return FrameRefGapIterator{n, 0}
}
@@ -758,6 +829,11 @@ func (n *FrameRefnode) rebalanceAfterRemove(gap FrameRefGapIterator) FrameRefGap
}
n.nrSegments++
sibling.nrSegments--
+
+ if FrameReftrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling {
if gap.index == 0 {
return FrameRefGapIterator{n, n.nrSegments}
@@ -790,6 +866,7 @@ func (n *FrameRefnode) rebalanceAfterRemove(gap FrameRefGapIterator) FrameRefGap
p.children[0] = nil
p.children[1] = nil
}
+
if gap.node == left {
return FrameRefGapIterator{p, gap.index}
}
@@ -836,10 +913,146 @@ func (n *FrameRefnode) rebalanceAfterRemove(gap FrameRefGapIterator) FrameRefGap
p.children[p.nrSegments] = nil
p.nrSegments--
+ if FrameReftrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
n = p
}
}
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *FrameRefnode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *FrameRefnode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *FrameRefnode) calculateMaxGapLeaf() uint64 {
+ max := FrameRefGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (FrameRefGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *FrameRefnode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FrameRefnode) searchFirstLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FrameRefGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := FrameRefGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *FrameRefnode) searchLastLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if n.maxGap.Get() < minSize {
+ return FrameRefGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := FrameRefGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
// A Iterator is conceptually one of:
//
// - A pointer to a segment in a set; or
@@ -1145,6 +1358,114 @@ func (gap FrameRefGapIterator) NextGap() FrameRefGapIterator {
return seg.NextGap()
}
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FrameRefGapIterator) NextLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if FrameReftrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap FrameRefGapIterator) nextLargeEnoughGapHelper(minSize uint64) FrameRefGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FrameRefGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap FrameRefGapIterator) PrevLargeEnoughGap(minSize uint64) FrameRefGapIterator {
+ if FrameReftrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap FrameRefGapIterator) prevLargeEnoughGapHelper(minSize uint64) FrameRefGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return FrameRefGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
// segmentBeforePosition returns the predecessor segment of the position given
// by n.children[i], which may or may not contain a child. If no such segment
// exists, segmentBeforePosition returns a terminal iterator.
@@ -1211,7 +1532,15 @@ func (n *FrameRefnode) writeDebugString(buf *bytes.Buffer, prefix string) {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
}
buf.WriteString(prefix)
- buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ if n.hasChildren {
+ if FrameReftrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
}
if child := n.children[n.nrSegments]; child != nil {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
@@ -1263,6 +1592,46 @@ func (s *FrameRefSet) ImportSortedSlices(sds *FrameRefSegmentDataSlices) error {
}
return nil
}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *FrameRefSet) segmentTestCheck(expectedSegments int, segFunc func(int, __generics_imported0.FileRange, uint64) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *FrameRefSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
func (s *FrameRefSet) saveRoot() *FrameRefSegmentDataSlices {
return s.ExportSortedSlices()
}
diff --git a/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go
index a0baca0c5..79d4610f0 100755..100644
--- a/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go
+++ b/pkg/sentry/fs/fsutil/fsutil_impl_state_autogen.go
@@ -25,6 +25,7 @@ func (x *Dirtynode) save(m state.Map) {
m.Save("parent", &x.parent)
m.Save("parentIndex", &x.parentIndex)
m.Save("hasChildren", &x.hasChildren)
+ m.Save("maxGap", &x.maxGap)
m.Save("keys", &x.keys)
m.Save("values", &x.values)
m.Save("children", &x.children)
@@ -36,6 +37,7 @@ func (x *Dirtynode) load(m state.Map) {
m.Load("parent", &x.parent)
m.Load("parentIndex", &x.parentIndex)
m.Load("hasChildren", &x.hasChildren)
+ m.Load("maxGap", &x.maxGap)
m.Load("keys", &x.keys)
m.Load("values", &x.values)
m.Load("children", &x.children)
@@ -75,6 +77,7 @@ func (x *FileRangenode) save(m state.Map) {
m.Save("parent", &x.parent)
m.Save("parentIndex", &x.parentIndex)
m.Save("hasChildren", &x.hasChildren)
+ m.Save("maxGap", &x.maxGap)
m.Save("keys", &x.keys)
m.Save("values", &x.values)
m.Save("children", &x.children)
@@ -86,6 +89,7 @@ func (x *FileRangenode) load(m state.Map) {
m.Load("parent", &x.parent)
m.Load("parentIndex", &x.parentIndex)
m.Load("hasChildren", &x.hasChildren)
+ m.Load("maxGap", &x.maxGap)
m.Load("keys", &x.keys)
m.Load("values", &x.values)
m.Load("children", &x.children)
@@ -125,6 +129,7 @@ func (x *FrameRefnode) save(m state.Map) {
m.Save("parent", &x.parent)
m.Save("parentIndex", &x.parentIndex)
m.Save("hasChildren", &x.hasChildren)
+ m.Save("maxGap", &x.maxGap)
m.Save("keys", &x.keys)
m.Save("values", &x.values)
m.Save("children", &x.children)
@@ -136,6 +141,7 @@ func (x *FrameRefnode) load(m state.Map) {
m.Load("parent", &x.parent)
m.Load("parentIndex", &x.parentIndex)
m.Load("hasChildren", &x.hasChildren)
+ m.Load("maxGap", &x.maxGap)
m.Load("keys", &x.keys)
m.Load("values", &x.values)
m.Load("children", &x.children)
diff --git a/pkg/sentry/fs/fsutil/fsutil_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_state_autogen.go
index 80b93ad25..80b93ad25 100755..100644
--- a/pkg/sentry/fs/fsutil/fsutil_state_autogen.go
+++ b/pkg/sentry/fs/fsutil/fsutil_state_autogen.go
diff --git a/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go b/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go
index 00b0994f6..00b0994f6 100755..100644
--- a/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go
+++ b/pkg/sentry/fs/fsutil/fsutil_unsafe_state_autogen.go
diff --git a/pkg/sentry/fs/gofer/fifo.go b/pkg/sentry/fs/gofer/fifo.go
index 456557058..456557058 100755..100644
--- a/pkg/sentry/fs/gofer/fifo.go
+++ b/pkg/sentry/fs/gofer/fifo.go
diff --git a/pkg/sentry/fs/gofer/gofer_state_autogen.go b/pkg/sentry/fs/gofer/gofer_state_autogen.go
index 7db9211b4..7db9211b4 100755..100644
--- a/pkg/sentry/fs/gofer/gofer_state_autogen.go
+++ b/pkg/sentry/fs/gofer/gofer_state_autogen.go
diff --git a/pkg/sentry/fs/host/host.go b/pkg/sentry/fs/host/host.go
index 081ba1dd8..081ba1dd8 100755..100644
--- a/pkg/sentry/fs/host/host.go
+++ b/pkg/sentry/fs/host/host.go
diff --git a/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go
index 488cbdfcf..488cbdfcf 100755..100644
--- a/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go
+++ b/pkg/sentry/fs/host/host_amd64_unsafe_state_autogen.go
diff --git a/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go
index 7371b44db..7371b44db 100755..100644
--- a/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go
+++ b/pkg/sentry/fs/host/host_arm64_unsafe_state_autogen.go
diff --git a/pkg/sentry/fs/host/host_state_autogen.go b/pkg/sentry/fs/host/host_state_autogen.go
index a6b97a154..a6b97a154 100755..100644
--- a/pkg/sentry/fs/host/host_state_autogen.go
+++ b/pkg/sentry/fs/host/host_state_autogen.go
diff --git a/pkg/sentry/fs/host/host_unsafe_state_autogen.go b/pkg/sentry/fs/host/host_unsafe_state_autogen.go
index b2d8c661f..b2d8c661f 100755..100644
--- a/pkg/sentry/fs/host/host_unsafe_state_autogen.go
+++ b/pkg/sentry/fs/host/host_unsafe_state_autogen.go
diff --git a/pkg/sentry/fs/host/util_amd64_unsafe.go b/pkg/sentry/fs/host/util_amd64_unsafe.go
index 66da6e9f5..66da6e9f5 100755..100644
--- a/pkg/sentry/fs/host/util_amd64_unsafe.go
+++ b/pkg/sentry/fs/host/util_amd64_unsafe.go
diff --git a/pkg/sentry/fs/host/util_arm64_unsafe.go b/pkg/sentry/fs/host/util_arm64_unsafe.go
index e8cb94aeb..e8cb94aeb 100755..100644
--- a/pkg/sentry/fs/host/util_arm64_unsafe.go
+++ b/pkg/sentry/fs/host/util_arm64_unsafe.go
diff --git a/pkg/sentry/fs/lock/lock_range.go b/pkg/sentry/fs/lock/lock_range.go
index 7a6f77640..7a6f77640 100755..100644
--- a/pkg/sentry/fs/lock/lock_range.go
+++ b/pkg/sentry/fs/lock/lock_range.go
diff --git a/pkg/sentry/fs/lock/lock_set.go b/pkg/sentry/fs/lock/lock_set.go
index 2343ca0b4..5356f5791 100755..100644
--- a/pkg/sentry/fs/lock/lock_set.go
+++ b/pkg/sentry/fs/lock/lock_set.go
@@ -5,6 +5,34 @@ import (
"fmt"
)
+// trackGaps is an optional parameter.
+//
+// If trackGaps is 1, the Set will track maximum gap size recursively,
+// enabling the GapIterator.{Prev,Next}LargeEnoughGap functions. In this
+// case, Key must be an unsigned integer.
+//
+// trackGaps must be 0 or 1.
+const LocktrackGaps = 0
+
+var _ = uint8(LocktrackGaps << 7) // Will fail if not zero or one.
+
+// dynamicGap is a type that disappears if trackGaps is 0.
+type LockdynamicGap [LocktrackGaps]uint64
+
+// Get returns the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *LockdynamicGap) Get() uint64 {
+ return d[:][0]
+}
+
+// Set sets the value of the gap.
+//
+// Precondition: trackGaps must be non-zero.
+func (d *LockdynamicGap) Set(v uint64) {
+ d[:][0] = v
+}
+
const (
// minDegree is the minimum degree of an internal node in a Set B-tree.
//
@@ -263,8 +291,12 @@ func (s *LockSet) Insert(gap LockGapIterator, r LockRange, val Lock) LockIterato
}
if prev.Ok() && prev.End() == r.Start {
if mval, ok := (lockSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ shrinkMaxGap := LocktrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
prev.SetEndUnchecked(r.End)
prev.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
if next.Ok() && next.Start() == r.End {
val = mval
if mval, ok := (lockSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
@@ -278,11 +310,16 @@ func (s *LockSet) Insert(gap LockGapIterator, r LockRange, val Lock) LockIterato
}
if next.Ok() && next.Start() == r.End {
if mval, ok := (lockSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ shrinkMaxGap := LocktrackGaps != 0 && gap.Range().Length() == gap.node.maxGap.Get()
next.SetStartUnchecked(r.Start)
next.SetValue(mval)
+ if shrinkMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return next
}
}
+
return s.InsertWithoutMergingUnchecked(gap, r, val)
}
@@ -309,11 +346,15 @@ func (s *LockSet) InsertWithoutMerging(gap LockGapIterator, r LockRange, val Loc
// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
func (s *LockSet) InsertWithoutMergingUnchecked(gap LockGapIterator, r LockRange, val Lock) LockIterator {
gap = gap.node.rebalanceBeforeInsert(gap)
+ splitMaxGap := LocktrackGaps != 0 && (gap.node.nrSegments == 0 || gap.Range().Length() == gap.node.maxGap.Get())
copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
gap.node.keys[gap.index] = r
gap.node.values[gap.index] = val
gap.node.nrSegments++
+ if splitMaxGap {
+ gap.node.updateMaxGapLeaf()
+ }
return LockIterator{gap.node, gap.index}
}
@@ -328,12 +369,20 @@ func (s *LockSet) Remove(seg LockIterator) LockGapIterator {
seg.SetRangeUnchecked(victim.Range())
seg.SetValue(victim.Value())
+
+ nextAdjacentNode := seg.NextSegment().node
+ if LocktrackGaps != 0 {
+ nextAdjacentNode.updateMaxGapLeaf()
+ }
return s.Remove(victim).NextGap()
}
copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
lockSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
seg.node.nrSegments--
+ if LocktrackGaps != 0 {
+ seg.node.updateMaxGapLeaf()
+ }
return seg.node.rebalanceAfterRemove(LockGapIterator{seg.node, seg.index})
}
@@ -383,6 +432,7 @@ func (s *LockSet) MergeUnchecked(first, second LockIterator) LockIterator {
first.SetEndUnchecked(second.End())
first.SetValue(mval)
+
return s.Remove(second).PrevSegment()
}
}
@@ -558,6 +608,12 @@ type Locknode struct {
// than "isLeaf" because false must be the correct value for an empty root.
hasChildren bool
+ // The longest gap within this node. If the node is a leaf, it's simply the
+ // maximum gap among all the (nrSegments+1) gaps formed by its nrSegments keys
+ // including the 0th and nrSegments-th gap possibly shared with its upper-level
+ // nodes; if it's a non-leaf node, it's the max of all children's maxGap.
+ maxGap LockdynamicGap
+
// Nodes store keys and values in separate arrays to maximize locality in
// the common case (scanning keys for lookup).
keys [LockmaxDegree - 1]LockRange
@@ -603,12 +659,12 @@ func (n *Locknode) nextSibling() *Locknode {
// required for insertion, and returns an updated iterator to the position
// represented by gap.
func (n *Locknode) rebalanceBeforeInsert(gap LockGapIterator) LockGapIterator {
- if n.parent != nil {
- gap = n.parent.rebalanceBeforeInsert(gap)
- }
if n.nrSegments < LockmaxDegree-1 {
return gap
}
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
if n.parent == nil {
left := &Locknode{
@@ -644,6 +700,11 @@ func (n *Locknode) rebalanceBeforeInsert(gap LockGapIterator) LockGapIterator {
n.hasChildren = true
n.children[0] = left
n.children[1] = right
+
+ if LocktrackGaps != 0 {
+ left.updateMaxGapLocal()
+ right.updateMaxGapLocal()
+ }
if gap.node != n {
return gap
}
@@ -681,6 +742,11 @@ func (n *Locknode) rebalanceBeforeInsert(gap LockGapIterator) LockGapIterator {
}
n.nrSegments = LockminDegree - 1
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
+
if gap.node != n {
return gap
}
@@ -726,6 +792,11 @@ func (n *Locknode) rebalanceAfterRemove(gap LockGapIterator) LockGapIterator {
}
n.nrSegments++
sibling.nrSegments--
+
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling && gap.index == sibling.nrSegments {
return LockGapIterator{n, 0}
}
@@ -754,6 +825,11 @@ func (n *Locknode) rebalanceAfterRemove(gap LockGapIterator) LockGapIterator {
}
n.nrSegments++
sibling.nrSegments--
+
+ if LocktrackGaps != 0 {
+ n.updateMaxGapLocal()
+ sibling.updateMaxGapLocal()
+ }
if gap.node == sibling {
if gap.index == 0 {
return LockGapIterator{n, n.nrSegments}
@@ -786,6 +862,7 @@ func (n *Locknode) rebalanceAfterRemove(gap LockGapIterator) LockGapIterator {
p.children[0] = nil
p.children[1] = nil
}
+
if gap.node == left {
return LockGapIterator{p, gap.index}
}
@@ -832,10 +909,146 @@ func (n *Locknode) rebalanceAfterRemove(gap LockGapIterator) LockGapIterator {
p.children[p.nrSegments] = nil
p.nrSegments--
+ if LocktrackGaps != 0 {
+ left.updateMaxGapLocal()
+ }
+
n = p
}
}
+// updateMaxGapLeaf updates maxGap bottom-up from the calling leaf until no
+// necessary update.
+//
+// Preconditions: n must be a leaf node, trackGaps must be 1.
+func (n *Locknode) updateMaxGapLeaf() {
+ if n.hasChildren {
+ panic(fmt.Sprintf("updateMaxGapLeaf should always be called on leaf node: %v", n))
+ }
+ max := n.calculateMaxGapLeaf()
+ if max == n.maxGap.Get() {
+
+ return
+ }
+ oldMax := n.maxGap.Get()
+ n.maxGap.Set(max)
+ if max > oldMax {
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() >= max {
+
+ break
+ }
+
+ p.maxGap.Set(max)
+ }
+ return
+ }
+
+ for p := n.parent; p != nil; p = p.parent {
+ if p.maxGap.Get() > oldMax {
+
+ break
+ }
+
+ parentNewMax := p.calculateMaxGapInternal()
+ if p.maxGap.Get() == parentNewMax {
+
+ break
+ }
+
+ p.maxGap.Set(parentNewMax)
+ }
+}
+
+// updateMaxGapLocal updates maxGap of the calling node solely with no
+// propagation to ancestor nodes.
+//
+// Precondition: trackGaps must be 1.
+func (n *Locknode) updateMaxGapLocal() {
+ if !n.hasChildren {
+
+ n.maxGap.Set(n.calculateMaxGapLeaf())
+ } else {
+
+ n.maxGap.Set(n.calculateMaxGapInternal())
+ }
+}
+
+// calculateMaxGapLeaf iterates the gaps within a leaf node and calculate the
+// max.
+//
+// Preconditions: n must be a leaf node.
+func (n *Locknode) calculateMaxGapLeaf() uint64 {
+ max := LockGapIterator{n, 0}.Range().Length()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := (LockGapIterator{n, i}).Range().Length(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// calculateMaxGapInternal iterates children's maxGap within an internal node n
+// and calculate the max.
+//
+// Preconditions: n must be a non-leaf node.
+func (n *Locknode) calculateMaxGapInternal() uint64 {
+ max := n.children[0].maxGap.Get()
+ for i := 1; i <= n.nrSegments; i++ {
+ if current := n.children[i].maxGap.Get(); current > max {
+ max = current
+ }
+ }
+ return max
+}
+
+// searchFirstLargeEnoughGap returns the first gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Locknode) searchFirstLargeEnoughGap(minSize uint64) LockGapIterator {
+ if n.maxGap.Get() < minSize {
+ return LockGapIterator{}
+ }
+ if n.hasChildren {
+ for i := 0; i <= n.nrSegments; i++ {
+ if largeEnoughGap := n.children[i].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := 0; i <= n.nrSegments; i++ {
+ currentGap := LockGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
+// searchLastLargeEnoughGap returns the last gap having at least minSize length
+// in the subtree rooted by n. If not found, return a terminal gap iterator.
+func (n *Locknode) searchLastLargeEnoughGap(minSize uint64) LockGapIterator {
+ if n.maxGap.Get() < minSize {
+ return LockGapIterator{}
+ }
+ if n.hasChildren {
+ for i := n.nrSegments; i >= 0; i-- {
+ if largeEnoughGap := n.children[i].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ }
+ } else {
+ for i := n.nrSegments; i >= 0; i-- {
+ currentGap := LockGapIterator{n, i}
+ if currentGap.Range().Length() >= minSize {
+ return currentGap
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid maxGap in %v", n))
+}
+
// A Iterator is conceptually one of:
//
// - A pointer to a segment in a set; or
@@ -1141,6 +1354,114 @@ func (gap LockGapIterator) NextGap() LockGapIterator {
return seg.NextGap()
}
+// NextLargeEnoughGap returns the iterated gap's first next gap with larger
+// length than minSize. If not found, return a terminal gap iterator (does NOT
+// include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap LockGapIterator) NextLargeEnoughGap(minSize uint64) LockGapIterator {
+ if LocktrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == gap.node.nrSegments {
+
+ gap.node = gap.NextSegment().node
+ gap.index = 0
+ return gap.nextLargeEnoughGapHelper(minSize)
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// nextLargeEnoughGapHelper is the helper function used by NextLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the trailing gap of a non-leaf node.
+func (gap LockGapIterator) nextLargeEnoughGapHelper(minSize uint64) LockGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return LockGapIterator{}
+ }
+
+ gap.index++
+ for gap.index <= gap.node.nrSegments {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index++
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == gap.node.nrSegments {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.nextLargeEnoughGapHelper(minSize)
+}
+
+// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
+// equal length than minSize. If not found, return a terminal gap iterator
+// (does NOT include this gap itself).
+//
+// Precondition: trackGaps must be 1.
+func (gap LockGapIterator) PrevLargeEnoughGap(minSize uint64) LockGapIterator {
+ if LocktrackGaps != 1 {
+ panic("set is not tracking gaps")
+ }
+ if gap.node != nil && gap.node.hasChildren && gap.index == 0 {
+
+ gap.node = gap.PrevSegment().node
+ gap.index = gap.node.nrSegments
+ return gap.prevLargeEnoughGapHelper(minSize)
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
+// prevLargeEnoughGapHelper is the helper function used by PrevLargeEnoughGap
+// to do the real recursions.
+//
+// Preconditions: gap is NOT the first gap of a non-leaf node.
+func (gap LockGapIterator) prevLargeEnoughGapHelper(minSize uint64) LockGapIterator {
+
+ for gap.node != nil &&
+ (gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+
+ if gap.node == nil {
+ return LockGapIterator{}
+ }
+
+ gap.index--
+ for gap.index >= 0 {
+ if gap.node.hasChildren {
+ if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
+ return largeEnoughGap
+ }
+ } else {
+ if gap.Range().Length() >= minSize {
+ return gap
+ }
+ }
+ gap.index--
+ }
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ if gap.node != nil && gap.index == 0 {
+
+ gap.node, gap.index = gap.node.parent, gap.node.parentIndex
+ }
+ return gap.prevLargeEnoughGapHelper(minSize)
+}
+
// segmentBeforePosition returns the predecessor segment of the position given
// by n.children[i], which may or may not contain a child. If no such segment
// exists, segmentBeforePosition returns a terminal iterator.
@@ -1207,7 +1528,15 @@ func (n *Locknode) writeDebugString(buf *bytes.Buffer, prefix string) {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
}
buf.WriteString(prefix)
- buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ if n.hasChildren {
+ if LocktrackGaps != 0 {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v, maxGap: %d\n", i, n.keys[i], n.values[i], n.maxGap.Get()))
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ } else {
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
}
if child := n.children[n.nrSegments]; child != nil {
child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
@@ -1259,6 +1588,46 @@ func (s *LockSet) ImportSortedSlices(sds *LockSegmentDataSlices) error {
}
return nil
}
+
+// segmentTestCheck returns an error if s is incorrectly sorted, does not
+// contain exactly expectedSegments segments, or contains a segment which
+// fails the passed check.
+//
+// This should be used only for testing, and has been added to this package for
+// templating convenience.
+func (s *LockSet) segmentTestCheck(expectedSegments int, segFunc func(int, LockRange, Lock) error) error {
+ havePrev := false
+ prev := uint64(0)
+ nrSegments := 0
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ next := seg.Start()
+ if havePrev && prev >= next {
+ return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments)
+ }
+ if segFunc != nil {
+ if err := segFunc(nrSegments, seg.Range(), seg.Value()); err != nil {
+ return err
+ }
+ }
+ prev = next
+ havePrev = true
+ nrSegments++
+ }
+ if nrSegments != expectedSegments {
+ return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments)
+ }
+ return nil
+}
+
+// countSegments counts the number of segments in the set.
+//
+// Similar to Check, this should only be used for testing.
+func (s *LockSet) countSegments() (segments int) {
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ segments++
+ }
+ return segments
+}
func (s *LockSet) saveRoot() *LockSegmentDataSlices {
return s.ExportSortedSlices()
}
diff --git a/pkg/sentry/fs/lock/lock_state_autogen.go b/pkg/sentry/fs/lock/lock_state_autogen.go
index a5db62814..d5ea98366 100755..100644
--- a/pkg/sentry/fs/lock/lock_state_autogen.go
+++ b/pkg/sentry/fs/lock/lock_state_autogen.go
@@ -67,6 +67,7 @@ func (x *Locknode) save(m state.Map) {
m.Save("parent", &x.parent)
m.Save("parentIndex", &x.parentIndex)
m.Save("hasChildren", &x.hasChildren)
+ m.Save("maxGap", &x.maxGap)
m.Save("keys", &x.keys)
m.Save("values", &x.values)
m.Save("children", &x.children)
@@ -78,6 +79,7 @@ func (x *Locknode) load(m state.Map) {
m.Load("parent", &x.parent)
m.Load("parentIndex", &x.parentIndex)
m.Load("hasChildren", &x.hasChildren)
+ m.Load("maxGap", &x.maxGap)
m.Load("keys", &x.keys)
m.Load("values", &x.values)
m.Load("children", &x.children)
diff --git a/pkg/sentry/fs/proc/device/device_state_autogen.go b/pkg/sentry/fs/proc/device/device_state_autogen.go
index 4a5e3cc88..4a5e3cc88 100755..100644
--- a/pkg/sentry/fs/proc/device/device_state_autogen.go
+++ b/pkg/sentry/fs/proc/device/device_state_autogen.go
diff --git a/pkg/sentry/fs/proc/proc_state_autogen.go b/pkg/sentry/fs/proc/proc_state_autogen.go
index baf7cd42b..baf7cd42b 100755..100644
--- a/pkg/sentry/fs/proc/proc_state_autogen.go
+++ b/pkg/sentry/fs/proc/proc_state_autogen.go
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go b/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go
index cfd3a40b4..cfd3a40b4 100755..100644
--- a/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go
+++ b/pkg/sentry/fs/proc/seqfile/seqfile_state_autogen.go
diff --git a/pkg/sentry/fs/ramfs/ramfs_state_autogen.go b/pkg/sentry/fs/ramfs/ramfs_state_autogen.go
index 0a001e0b6..0a001e0b6 100755..100644
--- a/pkg/sentry/fs/ramfs/ramfs_state_autogen.go
+++ b/pkg/sentry/fs/ramfs/ramfs_state_autogen.go
diff --git a/pkg/sentry/fs/sys/sys_state_autogen.go b/pkg/sentry/fs/sys/sys_state_autogen.go
index 733c504b1..733c504b1 100755..100644
--- a/pkg/sentry/fs/sys/sys_state_autogen.go
+++ b/pkg/sentry/fs/sys/sys_state_autogen.go
diff --git a/pkg/sentry/fs/timerfd/timerfd_state_autogen.go b/pkg/sentry/fs/timerfd/timerfd_state_autogen.go
index 955cf1d38..955cf1d38 100755..100644
--- a/pkg/sentry/fs/timerfd/timerfd_state_autogen.go
+++ b/pkg/sentry/fs/timerfd/timerfd_state_autogen.go
diff --git a/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go b/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go
index e4d2584fd..e4d2584fd 100755..100644
--- a/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go
+++ b/pkg/sentry/fs/tmpfs/tmpfs_state_autogen.go
diff --git a/pkg/sentry/fs/tty/tty_state_autogen.go b/pkg/sentry/fs/tty/tty_state_autogen.go
index 25d601072..25d601072 100755..100644
--- a/pkg/sentry/fs/tty/tty_state_autogen.go
+++ b/pkg/sentry/fs/tty/tty_state_autogen.go
diff --git a/pkg/sentry/fs/user/user.go b/pkg/sentry/fs/user/user.go
index fe7f67c00..fe7f67c00 100755..100644
--- a/pkg/sentry/fs/user/user.go
+++ b/pkg/sentry/fs/user/user.go
diff --git a/pkg/sentry/fs/user/user_state_autogen.go b/pkg/sentry/fs/user/user_state_autogen.go
index 8083e036c..8083e036c 100755..100644
--- a/pkg/sentry/fs/user/user_state_autogen.go
+++ b/pkg/sentry/fs/user/user_state_autogen.go