summaryrefslogtreecommitdiffhomepage
path: root/pkg/state
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/state')
-rw-r--r--pkg/state/BUILD78
-rwxr-xr-xpkg/state/addr_range.go62
-rwxr-xr-xpkg/state/addr_set.go1274
-rw-r--r--pkg/state/object.proto140
-rwxr-xr-xpkg/state/object_go_proto/object.pb.go1195
-rw-r--r--pkg/state/state_test.go720
-rw-r--r--pkg/state/statefile/BUILD23
-rwxr-xr-xpkg/state/statefile/statefile_state_autogen.go4
-rw-r--r--pkg/state/statefile/statefile_test.go290
9 files changed, 2535 insertions, 1251 deletions
diff --git a/pkg/state/BUILD b/pkg/state/BUILD
deleted file mode 100644
index be93750bf..000000000
--- a/pkg/state/BUILD
+++ /dev/null
@@ -1,78 +0,0 @@
-load("//tools/go_stateify:defs.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "addr_range",
- out = "addr_range.go",
- package = "state",
- prefix = "addr",
- template = "//pkg/segment:generic_range",
- types = {
- "T": "uintptr",
- },
-)
-
-go_template_instance(
- name = "addr_set",
- out = "addr_set.go",
- consts = {
- "minDegree": "10",
- },
- imports = {
- "reflect": "reflect",
- },
- package = "state",
- prefix = "addr",
- template = "//pkg/segment:generic_set",
- types = {
- "Key": "uintptr",
- "Range": "addrRange",
- "Value": "reflect.Value",
- "Functions": "addrSetFunctions",
- },
-)
-
-go_library(
- name = "state",
- srcs = [
- "addr_range.go",
- "addr_set.go",
- "decode.go",
- "encode.go",
- "encode_unsafe.go",
- "map.go",
- "printer.go",
- "state.go",
- "stats.go",
- ],
- importpath = "gvisor.dev/gvisor/pkg/state",
- visibility = ["//:sandbox"],
- deps = [
- ":object_go_proto",
- "@com_github_golang_protobuf//proto:go_default_library",
- ],
-)
-
-proto_library(
- name = "object_proto",
- srcs = ["object.proto"],
- visibility = ["//:sandbox"],
-)
-
-go_proto_library(
- name = "object_go_proto",
- importpath = "gvisor.dev/gvisor/pkg/state/object_go_proto",
- proto = ":object_proto",
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "state_test",
- timeout = "long",
- srcs = ["state_test.go"],
- embed = [":state"],
-)
diff --git a/pkg/state/addr_range.go b/pkg/state/addr_range.go
new file mode 100755
index 000000000..45720c643
--- /dev/null
+++ b/pkg/state/addr_range.go
@@ -0,0 +1,62 @@
+package state
+
+// A Range represents a contiguous range of T.
+//
+// +stateify savable
+type addrRange struct {
+ // Start is the inclusive start of the range.
+ Start uintptr
+
+ // End is the exclusive end of the range.
+ End uintptr
+}
+
+// WellFormed returns true if r.Start <= r.End. All other methods on a Range
+// require that the Range is well-formed.
+func (r addrRange) WellFormed() bool {
+ return r.Start <= r.End
+}
+
+// Length returns the length of the range.
+func (r addrRange) Length() uintptr {
+ return r.End - r.Start
+}
+
+// Contains returns true if r contains x.
+func (r addrRange) Contains(x uintptr) bool {
+ return r.Start <= x && x < r.End
+}
+
+// Overlaps returns true if r and r2 overlap.
+func (r addrRange) Overlaps(r2 addrRange) bool {
+ return r.Start < r2.End && r2.Start < r.End
+}
+
+// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is
+// contained within r.
+func (r addrRange) IsSupersetOf(r2 addrRange) bool {
+ return r.Start <= r2.Start && r.End >= r2.End
+}
+
+// Intersect returns a range consisting of the intersection between r and r2.
+// If r and r2 do not overlap, Intersect returns a range with unspecified
+// bounds, but for which Length() == 0.
+func (r addrRange) Intersect(r2 addrRange) addrRange {
+ if r.Start < r2.Start {
+ r.Start = r2.Start
+ }
+ if r.End > r2.End {
+ r.End = r2.End
+ }
+ if r.End < r.Start {
+ r.End = r.Start
+ }
+ return r
+}
+
+// CanSplitAt returns true if it is legal to split a segment spanning the range
+// r at x; that is, splitting at x would produce two ranges, both of which have
+// non-zero length.
+func (r addrRange) CanSplitAt(x uintptr) bool {
+ return r.Contains(x) && r.Start < x
+}
diff --git a/pkg/state/addr_set.go b/pkg/state/addr_set.go
new file mode 100755
index 000000000..5261aa488
--- /dev/null
+++ b/pkg/state/addr_set.go
@@ -0,0 +1,1274 @@
+package state
+
+import (
+ __generics_imported0 "reflect"
+)
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ // minDegree is the minimum degree of an internal node in a Set B-tree.
+ //
+ // - Any non-root node has at least minDegree-1 segments.
+ //
+ // - Any non-root internal (non-leaf) node has at least minDegree children.
+ //
+ // - The root node may have fewer than minDegree-1 segments, but it may
+ // only have 0 segments if the tree is empty.
+ //
+ // Our implementation requires minDegree >= 3. Higher values of minDegree
+ // usually improve performance, but increase memory usage for small sets.
+ addrminDegree = 10
+
+ addrmaxDegree = 2 * addrminDegree
+)
+
+// A Set is a mapping of segments with non-overlapping Range keys. The zero
+// value for a Set is an empty set. Set values are not safely movable nor
+// copyable. Set is thread-compatible.
+//
+// +stateify savable
+type addrSet struct {
+ root addrnode `state:".(*addrSegmentDataSlices)"`
+}
+
+// IsEmpty returns true if the set contains no segments.
+func (s *addrSet) IsEmpty() bool {
+ return s.root.nrSegments == 0
+}
+
+// IsEmptyRange returns true iff no segments in the set overlap the given
+// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be
+// more efficient.
+func (s *addrSet) IsEmptyRange(r addrRange) bool {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return true
+ }
+ _, gap := s.Find(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ return r.End <= gap.End()
+}
+
+// Span returns the total size of all segments in the set.
+func (s *addrSet) Span() uintptr {
+ var sz uintptr
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sz += seg.Range().Length()
+ }
+ return sz
+}
+
+// SpanRange returns the total size of the intersection of segments in the set
+// with the given range.
+func (s *addrSet) SpanRange(r addrRange) uintptr {
+ switch {
+ case r.Length() < 0:
+ panic(fmt.Sprintf("invalid range %v", r))
+ case r.Length() == 0:
+ return 0
+ }
+ var sz uintptr
+ for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
+ sz += seg.Range().Intersect(r).Length()
+ }
+ return sz
+}
+
+// FirstSegment returns the first segment in the set. If the set is empty,
+// FirstSegment returns a terminal iterator.
+func (s *addrSet) FirstSegment() addrIterator {
+ if s.root.nrSegments == 0 {
+ return addrIterator{}
+ }
+ return s.root.firstSegment()
+}
+
+// LastSegment returns the last segment in the set. If the set is empty,
+// LastSegment returns a terminal iterator.
+func (s *addrSet) LastSegment() addrIterator {
+ if s.root.nrSegments == 0 {
+ return addrIterator{}
+ }
+ return s.root.lastSegment()
+}
+
+// FirstGap returns the first gap in the set.
+func (s *addrSet) FirstGap() addrGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return addrGapIterator{n, 0}
+}
+
+// LastGap returns the last gap in the set.
+func (s *addrSet) LastGap() addrGapIterator {
+ n := &s.root
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return addrGapIterator{n, n.nrSegments}
+}
+
+// Find returns the segment or gap whose range contains the given key. If a
+// segment is found, the returned Iterator is non-terminal and the
+// returned GapIterator is terminal. Otherwise, the returned Iterator is
+// terminal and the returned GapIterator is non-terminal.
+func (s *addrSet) Find(key uintptr) (addrIterator, addrGapIterator) {
+ n := &s.root
+ for {
+
+ lower := 0
+ upper := n.nrSegments
+ for lower < upper {
+ i := lower + (upper-lower)/2
+ if r := n.keys[i]; key < r.End {
+ if key >= r.Start {
+ return addrIterator{n, i}, addrGapIterator{}
+ }
+ upper = i
+ } else {
+ lower = i + 1
+ }
+ }
+ i := lower
+ if !n.hasChildren {
+ return addrIterator{}, addrGapIterator{n, i}
+ }
+ n = n.children[i]
+ }
+}
+
+// FindSegment returns the segment whose range contains the given key. If no
+// such segment exists, FindSegment returns a terminal iterator.
+func (s *addrSet) FindSegment(key uintptr) addrIterator {
+ seg, _ := s.Find(key)
+ return seg
+}
+
+// LowerBoundSegment returns the segment with the lowest range that contains a
+// key greater than or equal to min. If no such segment exists,
+// LowerBoundSegment returns a terminal iterator.
+func (s *addrSet) LowerBoundSegment(min uintptr) addrIterator {
+ seg, gap := s.Find(min)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.NextSegment()
+}
+
+// UpperBoundSegment returns the segment with the highest range that contains a
+// key less than or equal to max. If no such segment exists, UpperBoundSegment
+// returns a terminal iterator.
+func (s *addrSet) UpperBoundSegment(max uintptr) addrIterator {
+ seg, gap := s.Find(max)
+ if seg.Ok() {
+ return seg
+ }
+ return gap.PrevSegment()
+}
+
+// FindGap returns the gap containing the given key. If no such gap exists
+// (i.e. the set contains a segment containing that key), FindGap returns a
+// terminal iterator.
+func (s *addrSet) FindGap(key uintptr) addrGapIterator {
+ _, gap := s.Find(key)
+ return gap
+}
+
+// LowerBoundGap returns the gap with the lowest range that is greater than or
+// equal to min.
+func (s *addrSet) LowerBoundGap(min uintptr) addrGapIterator {
+ seg, gap := s.Find(min)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.NextGap()
+}
+
+// UpperBoundGap returns the gap with the highest range that is less than or
+// equal to max.
+func (s *addrSet) UpperBoundGap(max uintptr) addrGapIterator {
+ seg, gap := s.Find(max)
+ if gap.Ok() {
+ return gap
+ }
+ return seg.PrevGap()
+}
+
+// Add inserts the given segment into the set and returns true. If the new
+// segment can be merged with adjacent segments, Add will do so. If the new
+// segment would overlap an existing segment, Add returns false. If Add
+// succeeds, all existing iterators are invalidated.
+func (s *addrSet) Add(r addrRange, val __generics_imported0.Value) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.Insert(gap, r, val)
+ return true
+}
+
+// AddWithoutMerging inserts the given segment into the set and returns true.
+// If it would overlap an existing segment, AddWithoutMerging does nothing and
+// returns false. If AddWithoutMerging succeeds, all existing iterators are
+// invalidated.
+func (s *addrSet) AddWithoutMerging(r addrRange, val __generics_imported0.Value) bool {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ gap := s.FindGap(r.Start)
+ if !gap.Ok() {
+ return false
+ }
+ if r.End > gap.End() {
+ return false
+ }
+ s.InsertWithoutMergingUnchecked(gap, r, val)
+ return true
+}
+
+// Insert inserts the given segment into the given gap. If the new segment can
+// be merged with adjacent segments, Insert will do so. Insert returns an
+// iterator to the segment containing the inserted value (which may have been
+// merged with other values). All existing iterators (including gap, but not
+// including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid, Insert panics.
+//
+// Insert is semantically equivalent to a InsertWithoutMerging followed by a
+// Merge, but may be more efficient. Note that there is no unchecked variant of
+// Insert since Insert must retrieve and inspect gap's predecessor and
+// successor segments regardless.
+func (s *addrSet) Insert(gap addrGapIterator, r addrRange, val __generics_imported0.Value) addrIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ prev, next := gap.PrevSegment(), gap.NextSegment()
+ if prev.Ok() && prev.End() > r.Start {
+ panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range()))
+ }
+ if next.Ok() && next.Start() < r.End {
+ panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range()))
+ }
+ if prev.Ok() && prev.End() == r.Start {
+ if mval, ok := (addrSetFunctions{}).Merge(prev.Range(), prev.Value(), r, val); ok {
+ prev.SetEndUnchecked(r.End)
+ prev.SetValue(mval)
+ if next.Ok() && next.Start() == r.End {
+ val = mval
+ if mval, ok := (addrSetFunctions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok {
+ prev.SetEndUnchecked(next.End())
+ prev.SetValue(mval)
+ return s.Remove(next).PrevSegment()
+ }
+ }
+ return prev
+ }
+ }
+ if next.Ok() && next.Start() == r.End {
+ if mval, ok := (addrSetFunctions{}).Merge(r, val, next.Range(), next.Value()); ok {
+ next.SetStartUnchecked(r.Start)
+ next.SetValue(mval)
+ return next
+ }
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMerging inserts the given segment into the given gap and
+// returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// If the gap cannot accommodate the segment, or if r is invalid,
+// InsertWithoutMerging panics.
+func (s *addrSet) InsertWithoutMerging(gap addrGapIterator, r addrRange, val __generics_imported0.Value) addrIterator {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if gr := gap.Range(); !gr.IsSupersetOf(r) {
+ panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr))
+ }
+ return s.InsertWithoutMergingUnchecked(gap, r, val)
+}
+
+// InsertWithoutMergingUnchecked inserts the given segment into the given gap
+// and returns an iterator to the inserted segment. All existing iterators
+// (including gap, but not including the returned iterator) are invalidated.
+//
+// Preconditions: r.Start >= gap.Start(); r.End <= gap.End().
+func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange, val __generics_imported0.Value) addrIterator {
+ gap = gap.node.rebalanceBeforeInsert(gap)
+ copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments])
+ copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments])
+ gap.node.keys[gap.index] = r
+ gap.node.values[gap.index] = val
+ gap.node.nrSegments++
+ return addrIterator{gap.node, gap.index}
+}
+
+// Remove removes the given segment and returns an iterator to the vacated gap.
+// All existing iterators (including seg, but not including the returned
+// iterator) are invalidated.
+func (s *addrSet) Remove(seg addrIterator) addrGapIterator {
+
+ if seg.node.hasChildren {
+
+ victim := seg.PrevSegment()
+
+ seg.SetRangeUnchecked(victim.Range())
+ seg.SetValue(victim.Value())
+ return s.Remove(victim).NextGap()
+ }
+ copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments])
+ copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments])
+ addrSetFunctions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1])
+ seg.node.nrSegments--
+ return seg.node.rebalanceAfterRemove(addrGapIterator{seg.node, seg.index})
+}
+
+// RemoveAll removes all segments from the set. All existing iterators are
+// invalidated.
+func (s *addrSet) RemoveAll() {
+ s.root = addrnode{}
+}
+
+// RemoveRange removes all segments in the given range. An iterator to the
+// newly formed gap is returned, and all existing iterators are invalidated.
+func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
+ seg, gap := s.Find(r.Start)
+ if seg.Ok() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
+ seg = s.Isolate(seg, r)
+ gap = s.Remove(seg)
+ }
+ return gap
+}
+
+// Merge attempts to merge two neighboring segments. If successful, Merge
+// returns an iterator to the merged segment, and all existing iterators are
+// invalidated. Otherwise, Merge returns a terminal iterator.
+//
+// If first is not the predecessor of second, Merge panics.
+func (s *addrSet) Merge(first, second addrIterator) addrIterator {
+ if first.NextSegment() != second {
+ panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range()))
+ }
+ return s.MergeUnchecked(first, second)
+}
+
+// MergeUnchecked attempts to merge two neighboring segments. If successful,
+// MergeUnchecked returns an iterator to the merged segment, and all existing
+// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal
+// iterator.
+//
+// Precondition: first is the predecessor of second: first.NextSegment() ==
+// second, first == second.PrevSegment().
+func (s *addrSet) MergeUnchecked(first, second addrIterator) addrIterator {
+ if first.End() == second.Start() {
+ if mval, ok := (addrSetFunctions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok {
+
+ first.SetEndUnchecked(second.End())
+ first.SetValue(mval)
+ return s.Remove(second).PrevSegment()
+ }
+ }
+ return addrIterator{}
+}
+
+// MergeAll attempts to merge all adjacent segments in the set. All existing
+// iterators are invalidated.
+func (s *addrSet) MergeAll() {
+ seg := s.FirstSegment()
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeRange attempts to merge all adjacent segments that contain a key in the
+// specific range. All existing iterators are invalidated.
+func (s *addrSet) MergeRange(r addrRange) {
+ seg := s.LowerBoundSegment(r.Start)
+ if !seg.Ok() {
+ return
+ }
+ next := seg.NextSegment()
+ for next.Ok() && next.Range().Start < r.End {
+ if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
+ seg, next = mseg, mseg.NextSegment()
+ } else {
+ seg, next = next, next.NextSegment()
+ }
+ }
+}
+
+// MergeAdjacent attempts to merge the segment containing r.Start with its
+// predecessor, and the segment containing r.End-1 with its successor.
+func (s *addrSet) MergeAdjacent(r addrRange) {
+ first := s.FindSegment(r.Start)
+ if first.Ok() {
+ if prev := first.PrevSegment(); prev.Ok() {
+ s.Merge(prev, first)
+ }
+ }
+ last := s.FindSegment(r.End - 1)
+ if last.Ok() {
+ if next := last.NextSegment(); next.Ok() {
+ s.Merge(last, next)
+ }
+ }
+}
+
+// Split splits the given segment at the given key and returns iterators to the
+// two resulting segments. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+//
+// If the segment cannot be split at split (because split is at the start or
+// end of the segment's range, so splitting would produce a segment with zero
+// length, or because split falls outside the segment's range altogether),
+// Split panics.
+func (s *addrSet) Split(seg addrIterator, split uintptr) (addrIterator, addrIterator) {
+ if !seg.Range().CanSplitAt(split) {
+ panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split))
+ }
+ return s.SplitUnchecked(seg, split)
+}
+
+// SplitUnchecked splits the given segment at the given key and returns
+// iterators to the two resulting segments. All existing iterators (including
+// seg, but not including the returned iterators) are invalidated.
+//
+// Preconditions: seg.Start() < key < seg.End().
+func (s *addrSet) SplitUnchecked(seg addrIterator, split uintptr) (addrIterator, addrIterator) {
+ val1, val2 := (addrSetFunctions{}).Split(seg.Range(), seg.Value(), split)
+ end2 := seg.End()
+ seg.SetEndUnchecked(split)
+ seg.SetValue(val1)
+ seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), addrRange{split, end2}, val2)
+
+ return seg2.PrevSegment(), seg2
+}
+
+// SplitAt splits the segment straddling split, if one exists. SplitAt returns
+// true if a segment was split and false otherwise. If SplitAt splits a
+// segment, all existing iterators are invalidated.
+func (s *addrSet) SplitAt(split uintptr) bool {
+ if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
+ s.SplitUnchecked(seg, split)
+ return true
+ }
+ return false
+}
+
+// Isolate ensures that the given segment's range does not escape r by
+// splitting at r.Start and r.End if necessary, and returns an updated iterator
+// to the bounded segment. All existing iterators (including seg, but not
+// including the returned iterators) are invalidated.
+func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
+ if seg.Range().CanSplitAt(r.Start) {
+ _, seg = s.SplitUnchecked(seg, r.Start)
+ }
+ if seg.Range().CanSplitAt(r.End) {
+ seg, _ = s.SplitUnchecked(seg, r.End)
+ }
+ return seg
+}
+
+// ApplyContiguous applies a function to a contiguous range of segments,
+// splitting if necessary. The function is applied until the first gap is
+// encountered, at which point the gap is returned. If the function is applied
+// across the entire range, a terminal gap is returned. All existing iterators
+// are invalidated.
+//
+// N.B. The Iterator must not be invalidated by the function.
+func (s *addrSet) ApplyContiguous(r addrRange, fn func(seg addrIterator)) addrGapIterator {
+ seg, gap := s.Find(r.Start)
+ if !seg.Ok() {
+ return gap
+ }
+ for {
+ seg = s.Isolate(seg, r)
+ fn(seg)
+ if seg.End() >= r.End {
+ return addrGapIterator{}
+ }
+ gap = seg.NextGap()
+ if !gap.IsEmpty() {
+ return gap
+ }
+ seg = gap.NextSegment()
+ if !seg.Ok() {
+
+ return addrGapIterator{}
+ }
+ }
+}
+
+// +stateify savable
+type addrnode struct {
+ // An internal binary tree node looks like:
+ //
+ // K
+ // / \
+ // Cl Cr
+ //
+ // where all keys in the subtree rooted by Cl (the left subtree) are less
+ // than K (the key of the parent node), and all keys in the subtree rooted
+ // by Cr (the right subtree) are greater than K.
+ //
+ // An internal B-tree node's indexes work out to look like:
+ //
+ // K0 K1 K2 ... Kn-1
+ // / \/ \/ \ ... / \
+ // C0 C1 C2 C3 ... Cn-1 Cn
+ //
+ // where n is nrSegments.
+ nrSegments int
+
+ // parent is a pointer to this node's parent. If this node is root, parent
+ // is nil.
+ parent *addrnode
+
+ // parentIndex is the index of this node in parent.children.
+ parentIndex int
+
+ // Flag for internal nodes that is technically redundant with "children[0]
+ // != nil", but is stored in the first cache line. "hasChildren" rather
+ // than "isLeaf" because false must be the correct value for an empty root.
+ hasChildren bool
+
+ // Nodes store keys and values in separate arrays to maximize locality in
+ // the common case (scanning keys for lookup).
+ keys [addrmaxDegree - 1]addrRange
+ values [addrmaxDegree - 1]__generics_imported0.Value
+ children [addrmaxDegree]*addrnode
+}
+
+// firstSegment returns the first segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *addrnode) firstSegment() addrIterator {
+ for n.hasChildren {
+ n = n.children[0]
+ }
+ return addrIterator{n, 0}
+}
+
+// lastSegment returns the last segment in the subtree rooted by n.
+//
+// Preconditions: n.nrSegments != 0.
+func (n *addrnode) lastSegment() addrIterator {
+ for n.hasChildren {
+ n = n.children[n.nrSegments]
+ }
+ return addrIterator{n, n.nrSegments - 1}
+}
+
+func (n *addrnode) prevSibling() *addrnode {
+ if n.parent == nil || n.parentIndex == 0 {
+ return nil
+ }
+ return n.parent.children[n.parentIndex-1]
+}
+
+func (n *addrnode) nextSibling() *addrnode {
+ if n.parent == nil || n.parentIndex == n.parent.nrSegments {
+ return nil
+ }
+ return n.parent.children[n.parentIndex+1]
+}
+
+// rebalanceBeforeInsert splits n and its ancestors if they are full, as
+// required for insertion, and returns an updated iterator to the position
+// represented by gap.
+func (n *addrnode) rebalanceBeforeInsert(gap addrGapIterator) addrGapIterator {
+ if n.parent != nil {
+ gap = n.parent.rebalanceBeforeInsert(gap)
+ }
+ if n.nrSegments < addrmaxDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ left := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n,
+ parentIndex: 0,
+ hasChildren: n.hasChildren,
+ }
+ right := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n,
+ parentIndex: 1,
+ hasChildren: n.hasChildren,
+ }
+ copy(left.keys[:addrminDegree-1], n.keys[:addrminDegree-1])
+ copy(left.values[:addrminDegree-1], n.values[:addrminDegree-1])
+ copy(right.keys[:addrminDegree-1], n.keys[addrminDegree:])
+ copy(right.values[:addrminDegree-1], n.values[addrminDegree:])
+ n.keys[0], n.values[0] = n.keys[addrminDegree-1], n.values[addrminDegree-1]
+ addrzeroValueSlice(n.values[1:])
+ if n.hasChildren {
+ copy(left.children[:addrminDegree], n.children[:addrminDegree])
+ copy(right.children[:addrminDegree], n.children[addrminDegree:])
+ addrzeroNodeSlice(n.children[2:])
+ for i := 0; i < addrminDegree; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ right.children[i].parent = right
+ right.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = 1
+ n.hasChildren = true
+ n.children[0] = left
+ n.children[1] = right
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < addrminDegree {
+ return addrGapIterator{left, gap.index}
+ }
+ return addrGapIterator{right, gap.index - addrminDegree}
+ }
+
+ copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments])
+ copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments])
+ n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[addrminDegree-1], n.values[addrminDegree-1]
+ copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1])
+ for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ {
+ n.parent.children[i].parentIndex = i
+ }
+ sibling := &addrnode{
+ nrSegments: addrminDegree - 1,
+ parent: n.parent,
+ parentIndex: n.parentIndex + 1,
+ hasChildren: n.hasChildren,
+ }
+ n.parent.children[n.parentIndex+1] = sibling
+ n.parent.nrSegments++
+ copy(sibling.keys[:addrminDegree-1], n.keys[addrminDegree:])
+ copy(sibling.values[:addrminDegree-1], n.values[addrminDegree:])
+ addrzeroValueSlice(n.values[addrminDegree-1:])
+ if n.hasChildren {
+ copy(sibling.children[:addrminDegree], n.children[addrminDegree:])
+ addrzeroNodeSlice(n.children[addrminDegree:])
+ for i := 0; i < addrminDegree; i++ {
+ sibling.children[i].parent = sibling
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments = addrminDegree - 1
+
+ if gap.node != n {
+ return gap
+ }
+ if gap.index < addrminDegree {
+ return gap
+ }
+ return addrGapIterator{sibling, gap.index - addrminDegree}
+}
+
+// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient
+// (contain fewer segments than required by B-tree invariants), as required for
+// removal, and returns an updated iterator to the position represented by gap.
+//
+// Precondition: n is the only node in the tree that may currently violate a
+// B-tree invariant.
+func (n *addrnode) rebalanceAfterRemove(gap addrGapIterator) addrGapIterator {
+ for {
+ if n.nrSegments >= addrminDegree-1 {
+ return gap
+ }
+ if n.parent == nil {
+
+ return gap
+ }
+
+ if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= addrminDegree {
+ copy(n.keys[1:], n.keys[:n.nrSegments])
+ copy(n.values[1:], n.values[:n.nrSegments])
+ n.keys[0] = n.parent.keys[n.parentIndex-1]
+ n.values[0] = n.parent.values[n.parentIndex-1]
+ n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1]
+ n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1]
+ addrSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ copy(n.children[1:], n.children[:n.nrSegments+1])
+ n.children[0] = sibling.children[sibling.nrSegments]
+ sibling.children[sibling.nrSegments] = nil
+ n.children[0].parent = n
+ n.children[0].parentIndex = 0
+ for i := 1; i < n.nrSegments+2; i++ {
+ n.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+ if gap.node == sibling && gap.index == sibling.nrSegments {
+ return addrGapIterator{n, 0}
+ }
+ if gap.node == n {
+ return addrGapIterator{n, gap.index + 1}
+ }
+ return gap
+ }
+ if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= addrminDegree {
+ n.keys[n.nrSegments] = n.parent.keys[n.parentIndex]
+ n.values[n.nrSegments] = n.parent.values[n.parentIndex]
+ n.parent.keys[n.parentIndex] = sibling.keys[0]
+ n.parent.values[n.parentIndex] = sibling.values[0]
+ copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:])
+ copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:])
+ addrSetFunctions{}.ClearValue(&sibling.values[sibling.nrSegments-1])
+ if n.hasChildren {
+ n.children[n.nrSegments+1] = sibling.children[0]
+ copy(sibling.children[:sibling.nrSegments], sibling.children[1:])
+ sibling.children[sibling.nrSegments] = nil
+ n.children[n.nrSegments+1].parent = n
+ n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1
+ for i := 0; i < sibling.nrSegments; i++ {
+ sibling.children[i].parentIndex = i
+ }
+ }
+ n.nrSegments++
+ sibling.nrSegments--
+ if gap.node == sibling {
+ if gap.index == 0 {
+ return addrGapIterator{n, n.nrSegments}
+ }
+ return addrGapIterator{sibling, gap.index - 1}
+ }
+ return gap
+ }
+
+ p := n.parent
+ if p.nrSegments == 1 {
+
+ left, right := p.children[0], p.children[1]
+ p.nrSegments = left.nrSegments + right.nrSegments + 1
+ p.hasChildren = left.hasChildren
+ p.keys[left.nrSegments] = p.keys[0]
+ p.values[left.nrSegments] = p.values[0]
+ copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments])
+ copy(p.values[:left.nrSegments], left.values[:left.nrSegments])
+ copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1])
+ copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := 0; i < p.nrSegments+1; i++ {
+ p.children[i].parent = p
+ p.children[i].parentIndex = i
+ }
+ } else {
+ p.children[0] = nil
+ p.children[1] = nil
+ }
+ if gap.node == left {
+ return addrGapIterator{p, gap.index}
+ }
+ if gap.node == right {
+ return addrGapIterator{p, gap.index + left.nrSegments + 1}
+ }
+ return gap
+ }
+ // Merge n and either sibling, along with the segment separating the
+ // two, into whichever of the two nodes comes first. This is the
+ // reverse of the non-root splitting case in
+ // node.rebalanceBeforeInsert.
+ var left, right *addrnode
+ if n.parentIndex > 0 {
+ left = n.prevSibling()
+ right = n
+ } else {
+ left = n
+ right = n.nextSibling()
+ }
+
+ if gap.node == right {
+ gap = addrGapIterator{left, gap.index + left.nrSegments + 1}
+ }
+ left.keys[left.nrSegments] = p.keys[left.parentIndex]
+ left.values[left.nrSegments] = p.values[left.parentIndex]
+ copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments])
+ copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments])
+ if left.hasChildren {
+ copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1])
+ for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ {
+ left.children[i].parent = left
+ left.children[i].parentIndex = i
+ }
+ }
+ left.nrSegments += right.nrSegments + 1
+ copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments])
+ copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments])
+ addrSetFunctions{}.ClearValue(&p.values[p.nrSegments-1])
+ copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1])
+ for i := 0; i < p.nrSegments; i++ {
+ p.children[i].parentIndex = i
+ }
+ p.children[p.nrSegments] = nil
+ p.nrSegments--
+
+ n = p
+ }
+}
+
+// A Iterator is conceptually one of:
+//
+// - A pointer to a segment in a set; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Iterators are copyable values and are meaningfully equality-comparable. The
+// zero value of Iterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type addrIterator struct {
+ // node is the node containing the iterated segment. If the iterator is
+ // terminal, node is nil.
+ node *addrnode
+
+ // index is the index of the segment in node.keys/values.
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (seg addrIterator) Ok() bool {
+ return seg.node != nil
+}
+
+// Range returns the iterated segment's range key.
+func (seg addrIterator) Range() addrRange {
+ return seg.node.keys[seg.index]
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (seg addrIterator) Start() uintptr {
+ return seg.node.keys[seg.index].Start
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (seg addrIterator) End() uintptr {
+ return seg.node.keys[seg.index].End
+}
+
+// SetRangeUnchecked mutates the iterated segment's range key. This operation
+// does not invalidate any iterators.
+//
+// Preconditions:
+//
+// - r.Length() > 0.
+//
+// - The new range must not overlap an existing one: If seg.NextSegment().Ok(),
+// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then
+// r.start >= seg.PrevSegment().End().
+func (seg addrIterator) SetRangeUnchecked(r addrRange) {
+ seg.node.keys[seg.index] = r
+}
+
+// SetRange mutates the iterated segment's range key. If the new range would
+// cause the iterated segment to overlap another segment, or if the new range
+// is invalid, SetRange panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetRange(r addrRange) {
+ if r.Length() <= 0 {
+ panic(fmt.Sprintf("invalid segment range %v", r))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && r.End > next.Start() {
+ panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range()))
+ }
+ seg.SetRangeUnchecked(r)
+}
+
+// SetStartUnchecked mutates the iterated segment's start. This operation does
+// not invalidate any iterators.
+//
+// Preconditions: The new start must be valid: start < seg.End(); if
+// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End().
+func (seg addrIterator) SetStartUnchecked(start uintptr) {
+ seg.node.keys[seg.index].Start = start
+}
+
+// SetStart mutates the iterated segment's start. If the new start value would
+// cause the iterated segment to overlap another segment, or would result in an
+// invalid range, SetStart panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetStart(start uintptr) {
+ if start >= seg.End() {
+ panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range()))
+ }
+ if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() {
+ panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range()))
+ }
+ seg.SetStartUnchecked(start)
+}
+
+// SetEndUnchecked mutates the iterated segment's end. This operation does not
+// invalidate any iterators.
+//
+// Preconditions: The new end must be valid: end > seg.Start(); if
+// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start().
+func (seg addrIterator) SetEndUnchecked(end uintptr) {
+ seg.node.keys[seg.index].End = end
+}
+
+// SetEnd mutates the iterated segment's end. If the new end value would cause
+// the iterated segment to overlap another segment, or would result in an
+// invalid range, SetEnd panics. This operation does not invalidate any
+// iterators.
+func (seg addrIterator) SetEnd(end uintptr) {
+ if end <= seg.Start() {
+ panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range()))
+ }
+ if next := seg.NextSegment(); next.Ok() && end > next.Start() {
+ panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range()))
+ }
+ seg.SetEndUnchecked(end)
+}
+
+// Value returns a copy of the iterated segment's value.
+func (seg addrIterator) Value() __generics_imported0.Value {
+ return seg.node.values[seg.index]
+}
+
+// ValuePtr returns a pointer to the iterated segment's value. The pointer is
+// invalidated if the iterator is invalidated. This operation does not
+// invalidate any iterators.
+func (seg addrIterator) ValuePtr() *__generics_imported0.Value {
+ return &seg.node.values[seg.index]
+}
+
+// SetValue mutates the iterated segment's value. This operation does not
+// invalidate any iterators.
+func (seg addrIterator) SetValue(val __generics_imported0.Value) {
+ seg.node.values[seg.index] = val
+}
+
+// PrevSegment returns the iterated segment's predecessor. If there is no
+// preceding segment, PrevSegment returns a terminal iterator.
+func (seg addrIterator) PrevSegment() addrIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index].lastSegment()
+ }
+ if seg.index > 0 {
+ return addrIterator{seg.node, seg.index - 1}
+ }
+ if seg.node.parent == nil {
+ return addrIterator{}
+ }
+ return addrsegmentBeforePosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// NextSegment returns the iterated segment's successor. If there is no
+// succeeding segment, NextSegment returns a terminal iterator.
+func (seg addrIterator) NextSegment() addrIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment()
+ }
+ if seg.index < seg.node.nrSegments-1 {
+ return addrIterator{seg.node, seg.index + 1}
+ }
+ if seg.node.parent == nil {
+ return addrIterator{}
+ }
+ return addrsegmentAfterPosition(seg.node.parent, seg.node.parentIndex)
+}
+
+// PrevGap returns the gap immediately before the iterated segment.
+func (seg addrIterator) PrevGap() addrGapIterator {
+ if seg.node.hasChildren {
+
+ return seg.node.children[seg.index].lastSegment().NextGap()
+ }
+ return addrGapIterator{seg.node, seg.index}
+}
+
+// NextGap returns the gap immediately after the iterated segment.
+func (seg addrIterator) NextGap() addrGapIterator {
+ if seg.node.hasChildren {
+ return seg.node.children[seg.index+1].firstSegment().PrevGap()
+ }
+ return addrGapIterator{seg.node, seg.index + 1}
+}
+
+// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent,
+// or the gap before the iterated segment otherwise. If seg.Start() ==
+// Functions.MinKey(), PrevNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
+// non-terminal.
+func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
+ gap := seg.PrevGap()
+ if gap.Range().Length() != 0 {
+ return addrIterator{}, gap
+ }
+ return gap.PrevSegment(), addrGapIterator{}
+}
+
+// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
+// the gap after the iterated segment otherwise. If seg.End() ==
+// Functions.MaxKey(), NextNonEmpty will return two terminal iterators.
+// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
+// non-terminal.
+func (seg addrIterator) NextNonEmpty() (addrIterator, addrGapIterator) {
+ gap := seg.NextGap()
+ if gap.Range().Length() != 0 {
+ return addrIterator{}, gap
+ }
+ return gap.NextSegment(), addrGapIterator{}
+}
+
+// A GapIterator is conceptually one of:
+//
+// - A pointer to a position between two segments, before the first segment, or
+// after the last segment in a set, called a *gap*; or
+//
+// - A terminal iterator, which is a sentinel indicating that the end of
+// iteration has been reached.
+//
+// Note that the gap between two adjacent segments exists (iterators to it are
+// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true
+// for such gaps. An empty set contains a single gap, spanning the entire range
+// of the set's keys.
+//
+// GapIterators are copyable values and are meaningfully equality-comparable.
+// The zero value of GapIterator is a terminal iterator.
+//
+// Unless otherwise specified, any mutation of a set invalidates all existing
+// iterators into the set.
+type addrGapIterator struct {
+ // The representation of a GapIterator is identical to that of an Iterator,
+ // except that index corresponds to positions between segments in the same
+ // way as for node.children (see comment for node.nrSegments).
+ node *addrnode
+ index int
+}
+
+// Ok returns true if the iterator is not terminal. All other methods are only
+// valid for non-terminal iterators.
+func (gap addrGapIterator) Ok() bool {
+ return gap.node != nil
+}
+
+// Range returns the range spanned by the iterated gap.
+func (gap addrGapIterator) Range() addrRange {
+ return addrRange{gap.Start(), gap.End()}
+}
+
+// Start is equivalent to Range().Start, but should be preferred if only the
+// start of the range is needed.
+func (gap addrGapIterator) Start() uintptr {
+ if ps := gap.PrevSegment(); ps.Ok() {
+ return ps.End()
+ }
+ return addrSetFunctions{}.MinKey()
+}
+
+// End is equivalent to Range().End, but should be preferred if only the end of
+// the range is needed.
+func (gap addrGapIterator) End() uintptr {
+ if ns := gap.NextSegment(); ns.Ok() {
+ return ns.Start()
+ }
+ return addrSetFunctions{}.MaxKey()
+}
+
+// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is
+// between two adjacent segments.)
+func (gap addrGapIterator) IsEmpty() bool {
+ return gap.Range().Length() == 0
+}
+
+// PrevSegment returns the segment immediately before the iterated gap. If no
+// such segment exists, PrevSegment returns a terminal iterator.
+func (gap addrGapIterator) PrevSegment() addrIterator {
+ return addrsegmentBeforePosition(gap.node, gap.index)
+}
+
+// NextSegment returns the segment immediately after the iterated gap. If no
+// such segment exists, NextSegment returns a terminal iterator.
+func (gap addrGapIterator) NextSegment() addrIterator {
+ return addrsegmentAfterPosition(gap.node, gap.index)
+}
+
+// PrevGap returns the iterated gap's predecessor. If no such gap exists,
+// PrevGap returns a terminal iterator.
+func (gap addrGapIterator) PrevGap() addrGapIterator {
+ seg := gap.PrevSegment()
+ if !seg.Ok() {
+ return addrGapIterator{}
+ }
+ return seg.PrevGap()
+}
+
+// NextGap returns the iterated gap's successor. If no such gap exists, NextGap
+// returns a terminal iterator.
+func (gap addrGapIterator) NextGap() addrGapIterator {
+ seg := gap.NextSegment()
+ if !seg.Ok() {
+ return addrGapIterator{}
+ }
+ return seg.NextGap()
+}
+
+// segmentBeforePosition returns the predecessor segment of the position given
+// by n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentBeforePosition returns a terminal iterator.
+func addrsegmentBeforePosition(n *addrnode, i int) addrIterator {
+ for i == 0 {
+ if n.parent == nil {
+ return addrIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return addrIterator{n, i - 1}
+}
+
+// segmentAfterPosition returns the successor segment of the position given by
+// n.children[i], which may or may not contain a child. If no such segment
+// exists, segmentAfterPosition returns a terminal iterator.
+func addrsegmentAfterPosition(n *addrnode, i int) addrIterator {
+ for i == n.nrSegments {
+ if n.parent == nil {
+ return addrIterator{}
+ }
+ n, i = n.parent, n.parentIndex
+ }
+ return addrIterator{n, i}
+}
+
+func addrzeroValueSlice(slice []__generics_imported0.Value) {
+
+ for i := range slice {
+ addrSetFunctions{}.ClearValue(&slice[i])
+ }
+}
+
+func addrzeroNodeSlice(slice []*addrnode) {
+ for i := range slice {
+ slice[i] = nil
+ }
+}
+
+// String stringifies a Set for debugging.
+func (s *addrSet) String() string {
+ return s.root.String()
+}
+
+// String stringifies a node (and all of its children) for debugging.
+func (n *addrnode) String() string {
+ var buf bytes.Buffer
+ n.writeDebugString(&buf, "")
+ return buf.String()
+}
+
+func (n *addrnode) writeDebugString(buf *bytes.Buffer, prefix string) {
+ if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) {
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren))
+ }
+ for i := 0; i < n.nrSegments; i++ {
+ if child := n.children[i]; child != nil {
+ cprefix := fmt.Sprintf("%s- % 3d ", prefix, i)
+ if child.parent != n || child.parentIndex != i {
+ buf.WriteString(cprefix)
+ buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i))
+ }
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i))
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i]))
+ }
+ if child := n.children[n.nrSegments]; child != nil {
+ child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments))
+ }
+}
+
+// SegmentDataSlices represents segments from a set as slices of start, end, and
+// values. SegmentDataSlices is primarily used as an intermediate representation
+// for save/restore and the layout here is optimized for that.
+//
+// +stateify savable
+type addrSegmentDataSlices struct {
+ Start []uintptr
+ End []uintptr
+ Values []__generics_imported0.Value
+}
+
+// ExportSortedSlice returns a copy of all segments in the given set, in ascending
+// key order.
+func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices {
+ var sds addrSegmentDataSlices
+ for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ sds.Start = append(sds.Start, seg.Start())
+ sds.End = append(sds.End, seg.End())
+ sds.Values = append(sds.Values, seg.Value())
+ }
+ sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
+ sds.End = sds.End[:len(sds.End):len(sds.End)]
+ sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
+ return &sds
+}
+
+// ImportSortedSlice initializes the given set from the given slice.
+//
+// Preconditions: s must be empty. sds must represent a valid set (the segments
+// in sds must have valid lengths that do not overlap). The segments in sds
+// must be sorted in ascending key order.
+func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error {
+ if !s.IsEmpty() {
+ return fmt.Errorf("cannot import into non-empty set %v", s)
+ }
+ gap := s.FirstGap()
+ for i := range sds.Start {
+ r := addrRange{sds.Start[i], sds.End[i]}
+ if !gap.Range().IsSupersetOf(r) {
+ return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
+ }
+ gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
+ }
+ return nil
+}
+func (s *addrSet) saveRoot() *addrSegmentDataSlices {
+ return s.ExportSortedSlices()
+}
+
+func (s *addrSet) loadRoot(sds *addrSegmentDataSlices) {
+ if err := s.ImportSortedSlices(sds); err != nil {
+ panic(err)
+ }
+}
diff --git a/pkg/state/object.proto b/pkg/state/object.proto
deleted file mode 100644
index 952289069..000000000
--- a/pkg/state/object.proto
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package gvisor.state.statefile;
-
-// Slice is a slice value.
-message Slice {
- uint32 length = 1;
- uint32 capacity = 2;
- uint64 ref_value = 3;
-}
-
-// Array is an array value.
-message Array {
- repeated Object contents = 1;
-}
-
-// Map is a map value.
-message Map {
- repeated Object keys = 1;
- repeated Object values = 2;
-}
-
-// Interface is an interface value.
-message Interface {
- string type = 1;
- Object value = 2;
-}
-
-// Struct is a basic composite value.
-message Struct {
- repeated Field fields = 1;
-}
-
-// Field encodes a single field.
-message Field {
- string name = 1;
- Object value = 2;
-}
-
-// Uint16s encodes an uint16 array. To be used inside oneof structure.
-message Uint16s {
- // There is no 16-bit type in protobuf so we use variable length 32-bit here.
- repeated uint32 values = 1;
-}
-
-// Uint32s encodes an uint32 array. To be used inside oneof structure.
-message Uint32s {
- repeated fixed32 values = 1;
-}
-
-// Uint64s encodes an uint64 array. To be used inside oneof structure.
-message Uint64s {
- repeated fixed64 values = 1;
-}
-
-// Uintptrs encodes an uintptr array. To be used inside oneof structure.
-message Uintptrs {
- repeated fixed64 values = 1;
-}
-
-// Int8s encodes an int8 array. To be used inside oneof structure.
-message Int8s {
- bytes values = 1;
-}
-
-// Int16s encodes an int16 array. To be used inside oneof structure.
-message Int16s {
- // There is no 16-bit type in protobuf so we use variable length 32-bit here.
- repeated int32 values = 1;
-}
-
-// Int32s encodes an int32 array. To be used inside oneof structure.
-message Int32s {
- repeated sfixed32 values = 1;
-}
-
-// Int64s encodes an int64 array. To be used inside oneof structure.
-message Int64s {
- repeated sfixed64 values = 1;
-}
-
-// Bools encodes a boolean array. To be used inside oneof structure.
-message Bools {
- repeated bool values = 1;
-}
-
-// Float64s encodes a float64 array. To be used inside oneof structure.
-message Float64s {
- repeated double values = 1;
-}
-
-// Float32s encodes a float32 array. To be used inside oneof structure.
-message Float32s {
- repeated float values = 1;
-}
-
-// Object are primitive encodings.
-//
-// Note that ref_value references an Object.id, below.
-message Object {
- oneof value {
- bool bool_value = 1;
- bytes string_value = 2;
- int64 int64_value = 3;
- uint64 uint64_value = 4;
- double double_value = 5;
- uint64 ref_value = 6;
- Slice slice_value = 7;
- Array array_value = 8;
- Interface interface_value = 9;
- Struct struct_value = 10;
- Map map_value = 11;
- bytes byte_array_value = 12;
- Uint16s uint16_array_value = 13;
- Uint32s uint32_array_value = 14;
- Uint64s uint64_array_value = 15;
- Uintptrs uintptr_array_value = 16;
- Int8s int8_array_value = 17;
- Int16s int16_array_value = 18;
- Int32s int32_array_value = 19;
- Int64s int64_array_value = 20;
- Bools bool_array_value = 21;
- Float64s float64_array_value = 22;
- Float32s float32_array_value = 23;
- }
-}
diff --git a/pkg/state/object_go_proto/object.pb.go b/pkg/state/object_go_proto/object.pb.go
new file mode 100755
index 000000000..dc5127149
--- /dev/null
+++ b/pkg/state/object_go_proto/object.pb.go
@@ -0,0 +1,1195 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: pkg/state/object.proto
+
+package gvisor_state_statefile
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Slice struct {
+ Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
+ Capacity uint32 `protobuf:"varint,2,opt,name=capacity,proto3" json:"capacity,omitempty"`
+ RefValue uint64 `protobuf:"varint,3,opt,name=ref_value,json=refValue,proto3" json:"ref_value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Slice) Reset() { *m = Slice{} }
+func (m *Slice) String() string { return proto.CompactTextString(m) }
+func (*Slice) ProtoMessage() {}
+func (*Slice) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{0}
+}
+
+func (m *Slice) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Slice.Unmarshal(m, b)
+}
+func (m *Slice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Slice.Marshal(b, m, deterministic)
+}
+func (m *Slice) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Slice.Merge(m, src)
+}
+func (m *Slice) XXX_Size() int {
+ return xxx_messageInfo_Slice.Size(m)
+}
+func (m *Slice) XXX_DiscardUnknown() {
+ xxx_messageInfo_Slice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Slice proto.InternalMessageInfo
+
+func (m *Slice) GetLength() uint32 {
+ if m != nil {
+ return m.Length
+ }
+ return 0
+}
+
+func (m *Slice) GetCapacity() uint32 {
+ if m != nil {
+ return m.Capacity
+ }
+ return 0
+}
+
+func (m *Slice) GetRefValue() uint64 {
+ if m != nil {
+ return m.RefValue
+ }
+ return 0
+}
+
+type Array struct {
+ Contents []*Object `protobuf:"bytes,1,rep,name=contents,proto3" json:"contents,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Array) Reset() { *m = Array{} }
+func (m *Array) String() string { return proto.CompactTextString(m) }
+func (*Array) ProtoMessage() {}
+func (*Array) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{1}
+}
+
+func (m *Array) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Array.Unmarshal(m, b)
+}
+func (m *Array) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Array.Marshal(b, m, deterministic)
+}
+func (m *Array) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Array.Merge(m, src)
+}
+func (m *Array) XXX_Size() int {
+ return xxx_messageInfo_Array.Size(m)
+}
+func (m *Array) XXX_DiscardUnknown() {
+ xxx_messageInfo_Array.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Array proto.InternalMessageInfo
+
+func (m *Array) GetContents() []*Object {
+ if m != nil {
+ return m.Contents
+ }
+ return nil
+}
+
+type Map struct {
+ Keys []*Object `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
+ Values []*Object `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Map) Reset() { *m = Map{} }
+func (m *Map) String() string { return proto.CompactTextString(m) }
+func (*Map) ProtoMessage() {}
+func (*Map) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{2}
+}
+
+func (m *Map) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Map.Unmarshal(m, b)
+}
+func (m *Map) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Map.Marshal(b, m, deterministic)
+}
+func (m *Map) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Map.Merge(m, src)
+}
+func (m *Map) XXX_Size() int {
+ return xxx_messageInfo_Map.Size(m)
+}
+func (m *Map) XXX_DiscardUnknown() {
+ xxx_messageInfo_Map.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Map proto.InternalMessageInfo
+
+func (m *Map) GetKeys() []*Object {
+ if m != nil {
+ return m.Keys
+ }
+ return nil
+}
+
+func (m *Map) GetValues() []*Object {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Interface struct {
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Object `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Interface) Reset() { *m = Interface{} }
+func (m *Interface) String() string { return proto.CompactTextString(m) }
+func (*Interface) ProtoMessage() {}
+func (*Interface) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{3}
+}
+
+func (m *Interface) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Interface.Unmarshal(m, b)
+}
+func (m *Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Interface.Marshal(b, m, deterministic)
+}
+func (m *Interface) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Interface.Merge(m, src)
+}
+func (m *Interface) XXX_Size() int {
+ return xxx_messageInfo_Interface.Size(m)
+}
+func (m *Interface) XXX_DiscardUnknown() {
+ xxx_messageInfo_Interface.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Interface proto.InternalMessageInfo
+
+func (m *Interface) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *Interface) GetValue() *Object {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Struct struct {
+ Fields []*Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{4}
+}
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+ return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() []*Field {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+type Field struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value *Object `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+func (*Field) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{5}
+}
+
+func (m *Field) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Field.Unmarshal(m, b)
+}
+func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Field.Marshal(b, m, deterministic)
+}
+func (m *Field) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Field.Merge(m, src)
+}
+func (m *Field) XXX_Size() int {
+ return xxx_messageInfo_Field.Size(m)
+}
+func (m *Field) XXX_DiscardUnknown() {
+ xxx_messageInfo_Field.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Field proto.InternalMessageInfo
+
+func (m *Field) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *Object {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Uint16S struct {
+ Values []uint32 `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Uint16S) Reset() { *m = Uint16S{} }
+func (m *Uint16S) String() string { return proto.CompactTextString(m) }
+func (*Uint16S) ProtoMessage() {}
+func (*Uint16S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{6}
+}
+
+func (m *Uint16S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Uint16S.Unmarshal(m, b)
+}
+func (m *Uint16S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Uint16S.Marshal(b, m, deterministic)
+}
+func (m *Uint16S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Uint16S.Merge(m, src)
+}
+func (m *Uint16S) XXX_Size() int {
+ return xxx_messageInfo_Uint16S.Size(m)
+}
+func (m *Uint16S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Uint16S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Uint16S proto.InternalMessageInfo
+
+func (m *Uint16S) GetValues() []uint32 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Uint32S struct {
+ Values []uint32 `protobuf:"fixed32,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Uint32S) Reset() { *m = Uint32S{} }
+func (m *Uint32S) String() string { return proto.CompactTextString(m) }
+func (*Uint32S) ProtoMessage() {}
+func (*Uint32S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{7}
+}
+
+func (m *Uint32S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Uint32S.Unmarshal(m, b)
+}
+func (m *Uint32S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Uint32S.Marshal(b, m, deterministic)
+}
+func (m *Uint32S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Uint32S.Merge(m, src)
+}
+func (m *Uint32S) XXX_Size() int {
+ return xxx_messageInfo_Uint32S.Size(m)
+}
+func (m *Uint32S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Uint32S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Uint32S proto.InternalMessageInfo
+
+func (m *Uint32S) GetValues() []uint32 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Uint64S struct {
+ Values []uint64 `protobuf:"fixed64,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Uint64S) Reset() { *m = Uint64S{} }
+func (m *Uint64S) String() string { return proto.CompactTextString(m) }
+func (*Uint64S) ProtoMessage() {}
+func (*Uint64S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{8}
+}
+
+func (m *Uint64S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Uint64S.Unmarshal(m, b)
+}
+func (m *Uint64S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Uint64S.Marshal(b, m, deterministic)
+}
+func (m *Uint64S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Uint64S.Merge(m, src)
+}
+func (m *Uint64S) XXX_Size() int {
+ return xxx_messageInfo_Uint64S.Size(m)
+}
+func (m *Uint64S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Uint64S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Uint64S proto.InternalMessageInfo
+
+func (m *Uint64S) GetValues() []uint64 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Uintptrs struct {
+ Values []uint64 `protobuf:"fixed64,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Uintptrs) Reset() { *m = Uintptrs{} }
+func (m *Uintptrs) String() string { return proto.CompactTextString(m) }
+func (*Uintptrs) ProtoMessage() {}
+func (*Uintptrs) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{9}
+}
+
+func (m *Uintptrs) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Uintptrs.Unmarshal(m, b)
+}
+func (m *Uintptrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Uintptrs.Marshal(b, m, deterministic)
+}
+func (m *Uintptrs) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Uintptrs.Merge(m, src)
+}
+func (m *Uintptrs) XXX_Size() int {
+ return xxx_messageInfo_Uintptrs.Size(m)
+}
+func (m *Uintptrs) XXX_DiscardUnknown() {
+ xxx_messageInfo_Uintptrs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Uintptrs proto.InternalMessageInfo
+
+func (m *Uintptrs) GetValues() []uint64 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Int8S struct {
+ Values []byte `protobuf:"bytes,1,opt,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int8S) Reset() { *m = Int8S{} }
+func (m *Int8S) String() string { return proto.CompactTextString(m) }
+func (*Int8S) ProtoMessage() {}
+func (*Int8S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{10}
+}
+
+func (m *Int8S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int8S.Unmarshal(m, b)
+}
+func (m *Int8S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int8S.Marshal(b, m, deterministic)
+}
+func (m *Int8S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int8S.Merge(m, src)
+}
+func (m *Int8S) XXX_Size() int {
+ return xxx_messageInfo_Int8S.Size(m)
+}
+func (m *Int8S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int8S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int8S proto.InternalMessageInfo
+
+func (m *Int8S) GetValues() []byte {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Int16S struct {
+ Values []int32 `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int16S) Reset() { *m = Int16S{} }
+func (m *Int16S) String() string { return proto.CompactTextString(m) }
+func (*Int16S) ProtoMessage() {}
+func (*Int16S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{11}
+}
+
+func (m *Int16S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int16S.Unmarshal(m, b)
+}
+func (m *Int16S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int16S.Marshal(b, m, deterministic)
+}
+func (m *Int16S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int16S.Merge(m, src)
+}
+func (m *Int16S) XXX_Size() int {
+ return xxx_messageInfo_Int16S.Size(m)
+}
+func (m *Int16S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int16S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int16S proto.InternalMessageInfo
+
+func (m *Int16S) GetValues() []int32 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Int32S struct {
+ Values []int32 `protobuf:"fixed32,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int32S) Reset() { *m = Int32S{} }
+func (m *Int32S) String() string { return proto.CompactTextString(m) }
+func (*Int32S) ProtoMessage() {}
+func (*Int32S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{12}
+}
+
+func (m *Int32S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int32S.Unmarshal(m, b)
+}
+func (m *Int32S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int32S.Marshal(b, m, deterministic)
+}
+func (m *Int32S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int32S.Merge(m, src)
+}
+func (m *Int32S) XXX_Size() int {
+ return xxx_messageInfo_Int32S.Size(m)
+}
+func (m *Int32S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int32S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32S proto.InternalMessageInfo
+
+func (m *Int32S) GetValues() []int32 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Int64S struct {
+ Values []int64 `protobuf:"fixed64,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int64S) Reset() { *m = Int64S{} }
+func (m *Int64S) String() string { return proto.CompactTextString(m) }
+func (*Int64S) ProtoMessage() {}
+func (*Int64S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{13}
+}
+
+func (m *Int64S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int64S.Unmarshal(m, b)
+}
+func (m *Int64S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int64S.Marshal(b, m, deterministic)
+}
+func (m *Int64S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int64S.Merge(m, src)
+}
+func (m *Int64S) XXX_Size() int {
+ return xxx_messageInfo_Int64S.Size(m)
+}
+func (m *Int64S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int64S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64S proto.InternalMessageInfo
+
+func (m *Int64S) GetValues() []int64 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Bools struct {
+ Values []bool `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Bools) Reset() { *m = Bools{} }
+func (m *Bools) String() string { return proto.CompactTextString(m) }
+func (*Bools) ProtoMessage() {}
+func (*Bools) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{14}
+}
+
+func (m *Bools) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bools.Unmarshal(m, b)
+}
+func (m *Bools) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bools.Marshal(b, m, deterministic)
+}
+func (m *Bools) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bools.Merge(m, src)
+}
+func (m *Bools) XXX_Size() int {
+ return xxx_messageInfo_Bools.Size(m)
+}
+func (m *Bools) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bools.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bools proto.InternalMessageInfo
+
+func (m *Bools) GetValues() []bool {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Float64S struct {
+ Values []float64 `protobuf:"fixed64,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Float64S) Reset() { *m = Float64S{} }
+func (m *Float64S) String() string { return proto.CompactTextString(m) }
+func (*Float64S) ProtoMessage() {}
+func (*Float64S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{15}
+}
+
+func (m *Float64S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Float64S.Unmarshal(m, b)
+}
+func (m *Float64S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Float64S.Marshal(b, m, deterministic)
+}
+func (m *Float64S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Float64S.Merge(m, src)
+}
+func (m *Float64S) XXX_Size() int {
+ return xxx_messageInfo_Float64S.Size(m)
+}
+func (m *Float64S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Float64S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Float64S proto.InternalMessageInfo
+
+func (m *Float64S) GetValues() []float64 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Float32S struct {
+ Values []float32 `protobuf:"fixed32,1,rep,packed,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Float32S) Reset() { *m = Float32S{} }
+func (m *Float32S) String() string { return proto.CompactTextString(m) }
+func (*Float32S) ProtoMessage() {}
+func (*Float32S) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{16}
+}
+
+func (m *Float32S) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Float32S.Unmarshal(m, b)
+}
+func (m *Float32S) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Float32S.Marshal(b, m, deterministic)
+}
+func (m *Float32S) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Float32S.Merge(m, src)
+}
+func (m *Float32S) XXX_Size() int {
+ return xxx_messageInfo_Float32S.Size(m)
+}
+func (m *Float32S) XXX_DiscardUnknown() {
+ xxx_messageInfo_Float32S.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Float32S proto.InternalMessageInfo
+
+func (m *Float32S) GetValues() []float32 {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+type Object struct {
+ // Types that are valid to be assigned to Value:
+ // *Object_BoolValue
+ // *Object_StringValue
+ // *Object_Int64Value
+ // *Object_Uint64Value
+ // *Object_DoubleValue
+ // *Object_RefValue
+ // *Object_SliceValue
+ // *Object_ArrayValue
+ // *Object_InterfaceValue
+ // *Object_StructValue
+ // *Object_MapValue
+ // *Object_ByteArrayValue
+ // *Object_Uint16ArrayValue
+ // *Object_Uint32ArrayValue
+ // *Object_Uint64ArrayValue
+ // *Object_UintptrArrayValue
+ // *Object_Int8ArrayValue
+ // *Object_Int16ArrayValue
+ // *Object_Int32ArrayValue
+ // *Object_Int64ArrayValue
+ // *Object_BoolArrayValue
+ // *Object_Float64ArrayValue
+ // *Object_Float32ArrayValue
+ Value isObject_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Object) Reset() { *m = Object{} }
+func (m *Object) String() string { return proto.CompactTextString(m) }
+func (*Object) ProtoMessage() {}
+func (*Object) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dee2c1912d4d62d, []int{17}
+}
+
+func (m *Object) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Object.Unmarshal(m, b)
+}
+func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Object.Marshal(b, m, deterministic)
+}
+func (m *Object) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Object.Merge(m, src)
+}
+func (m *Object) XXX_Size() int {
+ return xxx_messageInfo_Object.Size(m)
+}
+func (m *Object) XXX_DiscardUnknown() {
+ xxx_messageInfo_Object.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Object proto.InternalMessageInfo
+
+type isObject_Value interface {
+ isObject_Value()
+}
+
+type Object_BoolValue struct {
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Object_StringValue struct {
+ StringValue []byte `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Object_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Object_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Object_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Object_RefValue struct {
+ RefValue uint64 `protobuf:"varint,6,opt,name=ref_value,json=refValue,proto3,oneof"`
+}
+
+type Object_SliceValue struct {
+ SliceValue *Slice `protobuf:"bytes,7,opt,name=slice_value,json=sliceValue,proto3,oneof"`
+}
+
+type Object_ArrayValue struct {
+ ArrayValue *Array `protobuf:"bytes,8,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type Object_InterfaceValue struct {
+ InterfaceValue *Interface `protobuf:"bytes,9,opt,name=interface_value,json=interfaceValue,proto3,oneof"`
+}
+
+type Object_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,10,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Object_MapValue struct {
+ MapValue *Map `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Object_ByteArrayValue struct {
+ ByteArrayValue []byte `protobuf:"bytes,12,opt,name=byte_array_value,json=byteArrayValue,proto3,oneof"`
+}
+
+type Object_Uint16ArrayValue struct {
+ Uint16ArrayValue *Uint16S `protobuf:"bytes,13,opt,name=uint16_array_value,json=uint16ArrayValue,proto3,oneof"`
+}
+
+type Object_Uint32ArrayValue struct {
+ Uint32ArrayValue *Uint32S `protobuf:"bytes,14,opt,name=uint32_array_value,json=uint32ArrayValue,proto3,oneof"`
+}
+
+type Object_Uint64ArrayValue struct {
+ Uint64ArrayValue *Uint64S `protobuf:"bytes,15,opt,name=uint64_array_value,json=uint64ArrayValue,proto3,oneof"`
+}
+
+type Object_UintptrArrayValue struct {
+ UintptrArrayValue *Uintptrs `protobuf:"bytes,16,opt,name=uintptr_array_value,json=uintptrArrayValue,proto3,oneof"`
+}
+
+type Object_Int8ArrayValue struct {
+ Int8ArrayValue *Int8S `protobuf:"bytes,17,opt,name=int8_array_value,json=int8ArrayValue,proto3,oneof"`
+}
+
+type Object_Int16ArrayValue struct {
+ Int16ArrayValue *Int16S `protobuf:"bytes,18,opt,name=int16_array_value,json=int16ArrayValue,proto3,oneof"`
+}
+
+type Object_Int32ArrayValue struct {
+ Int32ArrayValue *Int32S `protobuf:"bytes,19,opt,name=int32_array_value,json=int32ArrayValue,proto3,oneof"`
+}
+
+type Object_Int64ArrayValue struct {
+ Int64ArrayValue *Int64S `protobuf:"bytes,20,opt,name=int64_array_value,json=int64ArrayValue,proto3,oneof"`
+}
+
+type Object_BoolArrayValue struct {
+ BoolArrayValue *Bools `protobuf:"bytes,21,opt,name=bool_array_value,json=boolArrayValue,proto3,oneof"`
+}
+
+type Object_Float64ArrayValue struct {
+ Float64ArrayValue *Float64S `protobuf:"bytes,22,opt,name=float64_array_value,json=float64ArrayValue,proto3,oneof"`
+}
+
+type Object_Float32ArrayValue struct {
+ Float32ArrayValue *Float32S `protobuf:"bytes,23,opt,name=float32_array_value,json=float32ArrayValue,proto3,oneof"`
+}
+
+func (*Object_BoolValue) isObject_Value() {}
+
+func (*Object_StringValue) isObject_Value() {}
+
+func (*Object_Int64Value) isObject_Value() {}
+
+func (*Object_Uint64Value) isObject_Value() {}
+
+func (*Object_DoubleValue) isObject_Value() {}
+
+func (*Object_RefValue) isObject_Value() {}
+
+func (*Object_SliceValue) isObject_Value() {}
+
+func (*Object_ArrayValue) isObject_Value() {}
+
+func (*Object_InterfaceValue) isObject_Value() {}
+
+func (*Object_StructValue) isObject_Value() {}
+
+func (*Object_MapValue) isObject_Value() {}
+
+func (*Object_ByteArrayValue) isObject_Value() {}
+
+func (*Object_Uint16ArrayValue) isObject_Value() {}
+
+func (*Object_Uint32ArrayValue) isObject_Value() {}
+
+func (*Object_Uint64ArrayValue) isObject_Value() {}
+
+func (*Object_UintptrArrayValue) isObject_Value() {}
+
+func (*Object_Int8ArrayValue) isObject_Value() {}
+
+func (*Object_Int16ArrayValue) isObject_Value() {}
+
+func (*Object_Int32ArrayValue) isObject_Value() {}
+
+func (*Object_Int64ArrayValue) isObject_Value() {}
+
+func (*Object_BoolArrayValue) isObject_Value() {}
+
+func (*Object_Float64ArrayValue) isObject_Value() {}
+
+func (*Object_Float32ArrayValue) isObject_Value() {}
+
+func (m *Object) GetValue() isObject_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Object) GetBoolValue() bool {
+ if x, ok := m.GetValue().(*Object_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Object) GetStringValue() []byte {
+ if x, ok := m.GetValue().(*Object_StringValue); ok {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (m *Object) GetInt64Value() int64 {
+ if x, ok := m.GetValue().(*Object_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *Object) GetUint64Value() uint64 {
+ if x, ok := m.GetValue().(*Object_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (m *Object) GetDoubleValue() float64 {
+ if x, ok := m.GetValue().(*Object_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *Object) GetRefValue() uint64 {
+ if x, ok := m.GetValue().(*Object_RefValue); ok {
+ return x.RefValue
+ }
+ return 0
+}
+
+func (m *Object) GetSliceValue() *Slice {
+ if x, ok := m.GetValue().(*Object_SliceValue); ok {
+ return x.SliceValue
+ }
+ return nil
+}
+
+func (m *Object) GetArrayValue() *Array {
+ if x, ok := m.GetValue().(*Object_ArrayValue); ok {
+ return x.ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetInterfaceValue() *Interface {
+ if x, ok := m.GetValue().(*Object_InterfaceValue); ok {
+ return x.InterfaceValue
+ }
+ return nil
+}
+
+func (m *Object) GetStructValue() *Struct {
+ if x, ok := m.GetValue().(*Object_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Object) GetMapValue() *Map {
+ if x, ok := m.GetValue().(*Object_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (m *Object) GetByteArrayValue() []byte {
+ if x, ok := m.GetValue().(*Object_ByteArrayValue); ok {
+ return x.ByteArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetUint16ArrayValue() *Uint16S {
+ if x, ok := m.GetValue().(*Object_Uint16ArrayValue); ok {
+ return x.Uint16ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetUint32ArrayValue() *Uint32S {
+ if x, ok := m.GetValue().(*Object_Uint32ArrayValue); ok {
+ return x.Uint32ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetUint64ArrayValue() *Uint64S {
+ if x, ok := m.GetValue().(*Object_Uint64ArrayValue); ok {
+ return x.Uint64ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetUintptrArrayValue() *Uintptrs {
+ if x, ok := m.GetValue().(*Object_UintptrArrayValue); ok {
+ return x.UintptrArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetInt8ArrayValue() *Int8S {
+ if x, ok := m.GetValue().(*Object_Int8ArrayValue); ok {
+ return x.Int8ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetInt16ArrayValue() *Int16S {
+ if x, ok := m.GetValue().(*Object_Int16ArrayValue); ok {
+ return x.Int16ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetInt32ArrayValue() *Int32S {
+ if x, ok := m.GetValue().(*Object_Int32ArrayValue); ok {
+ return x.Int32ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetInt64ArrayValue() *Int64S {
+ if x, ok := m.GetValue().(*Object_Int64ArrayValue); ok {
+ return x.Int64ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetBoolArrayValue() *Bools {
+ if x, ok := m.GetValue().(*Object_BoolArrayValue); ok {
+ return x.BoolArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetFloat64ArrayValue() *Float64S {
+ if x, ok := m.GetValue().(*Object_Float64ArrayValue); ok {
+ return x.Float64ArrayValue
+ }
+ return nil
+}
+
+func (m *Object) GetFloat32ArrayValue() *Float32S {
+ if x, ok := m.GetValue().(*Object_Float32ArrayValue); ok {
+ return x.Float32ArrayValue
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Object) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Object_BoolValue)(nil),
+ (*Object_StringValue)(nil),
+ (*Object_Int64Value)(nil),
+ (*Object_Uint64Value)(nil),
+ (*Object_DoubleValue)(nil),
+ (*Object_RefValue)(nil),
+ (*Object_SliceValue)(nil),
+ (*Object_ArrayValue)(nil),
+ (*Object_InterfaceValue)(nil),
+ (*Object_StructValue)(nil),
+ (*Object_MapValue)(nil),
+ (*Object_ByteArrayValue)(nil),
+ (*Object_Uint16ArrayValue)(nil),
+ (*Object_Uint32ArrayValue)(nil),
+ (*Object_Uint64ArrayValue)(nil),
+ (*Object_UintptrArrayValue)(nil),
+ (*Object_Int8ArrayValue)(nil),
+ (*Object_Int16ArrayValue)(nil),
+ (*Object_Int32ArrayValue)(nil),
+ (*Object_Int64ArrayValue)(nil),
+ (*Object_BoolArrayValue)(nil),
+ (*Object_Float64ArrayValue)(nil),
+ (*Object_Float32ArrayValue)(nil),
+ }
+}
+
+func init() {
+ proto.RegisterType((*Slice)(nil), "gvisor.state.statefile.Slice")
+ proto.RegisterType((*Array)(nil), "gvisor.state.statefile.Array")
+ proto.RegisterType((*Map)(nil), "gvisor.state.statefile.Map")
+ proto.RegisterType((*Interface)(nil), "gvisor.state.statefile.Interface")
+ proto.RegisterType((*Struct)(nil), "gvisor.state.statefile.Struct")
+ proto.RegisterType((*Field)(nil), "gvisor.state.statefile.Field")
+ proto.RegisterType((*Uint16S)(nil), "gvisor.state.statefile.Uint16s")
+ proto.RegisterType((*Uint32S)(nil), "gvisor.state.statefile.Uint32s")
+ proto.RegisterType((*Uint64S)(nil), "gvisor.state.statefile.Uint64s")
+ proto.RegisterType((*Uintptrs)(nil), "gvisor.state.statefile.Uintptrs")
+ proto.RegisterType((*Int8S)(nil), "gvisor.state.statefile.Int8s")
+ proto.RegisterType((*Int16S)(nil), "gvisor.state.statefile.Int16s")
+ proto.RegisterType((*Int32S)(nil), "gvisor.state.statefile.Int32s")
+ proto.RegisterType((*Int64S)(nil), "gvisor.state.statefile.Int64s")
+ proto.RegisterType((*Bools)(nil), "gvisor.state.statefile.Bools")
+ proto.RegisterType((*Float64S)(nil), "gvisor.state.statefile.Float64s")
+ proto.RegisterType((*Float32S)(nil), "gvisor.state.statefile.Float32s")
+ proto.RegisterType((*Object)(nil), "gvisor.state.statefile.Object")
+}
+
+func init() { proto.RegisterFile("pkg/state/object.proto", fileDescriptor_3dee2c1912d4d62d) }
+
+var fileDescriptor_3dee2c1912d4d62d = []byte{
+ // 781 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x96, 0x6f, 0x4f, 0xda, 0x5e,
+ 0x14, 0xc7, 0xa9, 0x40, 0x29, 0x07, 0x14, 0xb8, 0xfe, 0x7e, 0x8c, 0xcc, 0x38, 0xb1, 0x7b, 0x42,
+ 0xf6, 0x00, 0x33, 0x60, 0xc4, 0xf8, 0x64, 0x53, 0x13, 0x03, 0xc9, 0x8c, 0x59, 0x8d, 0xcb, 0x9e,
+ 0x99, 0x52, 0x2f, 0xac, 0xb3, 0xb6, 0x5d, 0x7b, 0x6b, 0xc2, 0xcb, 0xdc, 0x3b, 0x5a, 0xee, 0x1f,
+ 0xae, 0xfd, 0x03, 0xc5, 0xec, 0x89, 0xa1, 0xb7, 0xdf, 0xf3, 0xe1, 0xdc, 0xf3, 0x3d, 0xe7, 0x08,
+ 0xb4, 0xfd, 0xc7, 0xc5, 0x49, 0x48, 0x4c, 0x82, 0x4f, 0xbc, 0xd9, 0x2f, 0x6c, 0x91, 0xbe, 0x1f,
+ 0x78, 0xc4, 0x43, 0xed, 0xc5, 0xb3, 0x1d, 0x7a, 0x41, 0x9f, 0xbd, 0xe2, 0x7f, 0xe7, 0xb6, 0x83,
+ 0xf5, 0x1f, 0x50, 0xbe, 0x75, 0x6c, 0x0b, 0xa3, 0x36, 0xa8, 0x0e, 0x76, 0x17, 0xe4, 0x67, 0x47,
+ 0xe9, 0x2a, 0xbd, 0x5d, 0x43, 0x3c, 0xa1, 0xb7, 0xa0, 0x59, 0xa6, 0x6f, 0x5a, 0x36, 0x59, 0x76,
+ 0x76, 0xd8, 0x1b, 0xf9, 0x8c, 0x0e, 0xa0, 0x1a, 0xe0, 0xf9, 0xfd, 0xb3, 0xe9, 0x44, 0xb8, 0x53,
+ 0xec, 0x2a, 0xbd, 0x92, 0xa1, 0x05, 0x78, 0xfe, 0x9d, 0x3e, 0xeb, 0x97, 0x50, 0x3e, 0x0f, 0x02,
+ 0x73, 0x89, 0xce, 0x40, 0xb3, 0x3c, 0x97, 0x60, 0x97, 0x84, 0x1d, 0xa5, 0x5b, 0xec, 0xd5, 0x06,
+ 0xef, 0xfa, 0xeb, 0xb3, 0xe9, 0xdf, 0xb0, 0x94, 0x0d, 0xa9, 0xd7, 0x7f, 0x43, 0xf1, 0xda, 0xf4,
+ 0xd1, 0x00, 0x4a, 0x8f, 0x78, 0xf9, 0xda, 0x70, 0xa6, 0x45, 0x63, 0x50, 0x59, 0x62, 0x61, 0x67,
+ 0xe7, 0x55, 0x51, 0x42, 0xad, 0xdf, 0x41, 0x75, 0xea, 0x12, 0x1c, 0xcc, 0x4d, 0x0b, 0x23, 0x04,
+ 0x25, 0xb2, 0xf4, 0x31, 0xab, 0x49, 0xd5, 0x60, 0x9f, 0xd1, 0x08, 0xca, 0xfc, 0xc6, 0xb4, 0x1c,
+ 0xdb, 0xb9, 0x5c, 0xac, 0x7f, 0x06, 0xf5, 0x96, 0x04, 0x91, 0x45, 0xd0, 0x27, 0x50, 0xe7, 0x36,
+ 0x76, 0x1e, 0x56, 0xd7, 0x39, 0xdc, 0x04, 0xb8, 0xa2, 0x2a, 0x43, 0x88, 0xf5, 0x6f, 0x50, 0x66,
+ 0x07, 0x34, 0x27, 0xd7, 0x7c, 0x92, 0x39, 0xd1, 0xcf, 0xff, 0x98, 0xd3, 0x31, 0x54, 0xee, 0x6c,
+ 0x97, 0x7c, 0x1c, 0x87, 0xd4, 0x7e, 0x51, 0x2d, 0x9a, 0xd4, 0xae, 0xac, 0x86, 0x90, 0x0c, 0x07,
+ 0x69, 0x49, 0x25, 0x2d, 0x19, 0x8f, 0xd2, 0x12, 0x55, 0x4a, 0x74, 0xd0, 0xa8, 0xc4, 0x27, 0xc1,
+ 0x66, 0xcd, 0x11, 0x94, 0xa7, 0x2e, 0x39, 0x4d, 0x0a, 0x94, 0x5e, 0x5d, 0x0a, 0xba, 0xa0, 0x4e,
+ 0xd7, 0x25, 0x5b, 0x4e, 0x29, 0xb2, 0xb9, 0x36, 0x52, 0x8a, 0x6c, 0xaa, 0xcd, 0x78, 0x1a, 0x17,
+ 0x9e, 0xe7, 0xa4, 0x05, 0x5a, 0xfc, 0x2e, 0x57, 0x8e, 0x67, 0xae, 0x81, 0x28, 0x19, 0x4d, 0x36,
+ 0x95, 0x1d, 0xa9, 0xf9, 0x53, 0x03, 0x95, 0xdb, 0x81, 0x8e, 0x00, 0x66, 0x9e, 0xe7, 0x88, 0x41,
+ 0xa2, 0xb7, 0xd6, 0x26, 0x05, 0xa3, 0x4a, 0xcf, 0xd8, 0x2c, 0xa1, 0xf7, 0x50, 0x0f, 0x49, 0x60,
+ 0xbb, 0x8b, 0xfb, 0x17, 0x97, 0xeb, 0x93, 0x82, 0x51, 0xe3, 0xa7, 0x5c, 0x74, 0x0c, 0x35, 0x66,
+ 0x43, 0x6c, 0x1e, 0x8b, 0x93, 0x82, 0x01, 0xec, 0x50, 0x72, 0xa2, 0xb8, 0xa6, 0x44, 0x67, 0x96,
+ 0x72, 0xa2, 0xa4, 0xe8, 0xc1, 0x8b, 0x66, 0x0e, 0x16, 0xa2, 0x72, 0x57, 0xe9, 0x29, 0x54, 0xc4,
+ 0x4f, 0xb9, 0xe8, 0x30, 0x3e, 0xfa, 0xaa, 0xc0, 0xc8, 0xe1, 0x47, 0x5f, 0xa0, 0x16, 0xd2, 0xb5,
+ 0x22, 0x04, 0x15, 0xd6, 0x95, 0x1b, 0x1b, 0x9d, 0x6d, 0x20, 0x9a, 0x2a, 0x8b, 0x91, 0x04, 0x93,
+ 0xae, 0x0f, 0x41, 0xd0, 0xf2, 0x09, 0x6c, 0xd3, 0x50, 0x02, 0x8b, 0xe1, 0x84, 0xaf, 0xd0, 0xb0,
+ 0x57, 0x83, 0x2c, 0x28, 0x55, 0x46, 0x39, 0xde, 0x44, 0x91, 0x73, 0x3f, 0x29, 0x18, 0x7b, 0x32,
+ 0x96, 0xd3, 0x2e, 0x99, 0x05, 0x91, 0x45, 0x04, 0x0a, 0xf2, 0x07, 0x8d, 0xcf, 0xba, 0xb0, 0x28,
+ 0xb2, 0x08, 0x87, 0x9c, 0x41, 0xf5, 0xc9, 0xf4, 0x05, 0xa1, 0xc6, 0x08, 0x07, 0x9b, 0x08, 0xd7,
+ 0xa6, 0x4f, 0x4b, 0xfa, 0x64, 0xfa, 0x3c, 0xf6, 0x03, 0x34, 0x67, 0x4b, 0x82, 0xef, 0xe3, 0x55,
+ 0xa9, 0x8b, 0x3e, 0xd8, 0xa3, 0x6f, 0xce, 0x5f, 0xae, 0x7e, 0x03, 0x28, 0x62, 0x83, 0x9d, 0x50,
+ 0xef, 0xb2, 0x2f, 0x3c, 0xda, 0xf4, 0x85, 0x62, 0x15, 0x4c, 0x0a, 0x46, 0x93, 0x07, 0x67, 0x81,
+ 0xc3, 0x41, 0x02, 0xb8, 0xb7, 0x1d, 0x38, 0x1c, 0x48, 0xe0, 0x70, 0x90, 0x05, 0x8e, 0x47, 0x09,
+ 0x60, 0x63, 0x3b, 0x70, 0x3c, 0x92, 0xc0, 0xf1, 0x28, 0x06, 0x34, 0x60, 0x3f, 0xe2, 0x2b, 0x26,
+ 0x41, 0x6c, 0x32, 0x62, 0x37, 0x8f, 0x48, 0xb7, 0xd2, 0xa4, 0x60, 0xb4, 0x44, 0x78, 0x8c, 0x39,
+ 0x85, 0xa6, 0xed, 0x92, 0xd3, 0x04, 0xb0, 0x95, 0xdf, 0x88, 0x6c, 0x85, 0x89, 0xf6, 0x39, 0x3d,
+ 0x8f, 0x37, 0x63, 0x2b, 0x6b, 0x08, 0xca, 0xef, 0xa1, 0xe9, 0xca, 0x8f, 0x46, 0xda, 0x0e, 0x4e,
+ 0x4b, 0xb9, 0xb1, 0xbf, 0x95, 0xc6, 0xcd, 0x68, 0xa4, 0xbd, 0xe0, 0xb4, 0x94, 0x15, 0xff, 0x6d,
+ 0xa5, 0x71, 0x27, 0x1a, 0x69, 0x23, 0xa6, 0xd0, 0x64, 0xcb, 0x2c, 0x0e, 0xfb, 0x3f, 0xbf, 0x68,
+ 0x6c, 0xe1, 0xb2, 0x36, 0xf6, 0x3c, 0x27, 0xe9, 0xe9, 0x9c, 0xaf, 0xda, 0x04, 0xad, 0x9d, 0xef,
+ 0xe9, 0x6a, 0x3b, 0x53, 0x4f, 0x45, 0xf8, 0x1a, 0x66, 0xaa, 0x78, 0x6f, 0x5e, 0xc1, 0xe4, 0xe5,
+ 0x6b, 0x89, 0xf0, 0x17, 0xe6, 0x45, 0x45, 0xfc, 0xf7, 0x9d, 0xa9, 0xec, 0xc7, 0xd6, 0xf0, 0x6f,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x69, 0xc9, 0x45, 0x86, 0x09, 0x00, 0x00,
+}
diff --git a/pkg/state/state_test.go b/pkg/state/state_test.go
deleted file mode 100644
index 7c24bbcda..000000000
--- a/pkg/state/state_test.go
+++ /dev/null
@@ -1,720 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package state
-
-import (
- "bytes"
- "io/ioutil"
- "math"
- "reflect"
- "testing"
-)
-
-// TestCase is used to define a single success/failure testcase of
-// serialization of a set of objects.
-type TestCase struct {
- // Name is the name of the test case.
- Name string
-
- // Objects is the list of values to serialize.
- Objects []interface{}
-
- // Fail is whether the test case is supposed to fail or not.
- Fail bool
-}
-
-// runTest runs all testcases.
-func runTest(t *testing.T, tests []TestCase) {
- for _, test := range tests {
- t.Logf("TEST %s:", test.Name)
- for i, root := range test.Objects {
- t.Logf(" case#%d: %#v", i, root)
-
- // Save the passed object.
- saveBuffer := &bytes.Buffer{}
- saveObjectPtr := reflect.New(reflect.TypeOf(root))
- saveObjectPtr.Elem().Set(reflect.ValueOf(root))
- if err := Save(saveBuffer, saveObjectPtr.Interface(), nil); err != nil && !test.Fail {
- t.Errorf(" FAIL: Save failed unexpectedly: %v", err)
- continue
- } else if err != nil {
- t.Logf(" PASS: Save failed as expected: %v", err)
- continue
- }
-
- // Load a new copy of the object.
- loadObjectPtr := reflect.New(reflect.TypeOf(root))
- if err := Load(bytes.NewReader(saveBuffer.Bytes()), loadObjectPtr.Interface(), nil); err != nil && !test.Fail {
- t.Errorf(" FAIL: Load failed unexpectedly: %v", err)
- continue
- } else if err != nil {
- t.Logf(" PASS: Load failed as expected: %v", err)
- continue
- }
-
- // Compare the values.
- loadedValue := loadObjectPtr.Elem().Interface()
- if eq := reflect.DeepEqual(root, loadedValue); !eq && !test.Fail {
- t.Errorf(" FAIL: Objects differs; got %#v", loadedValue)
- continue
- } else if !eq {
- t.Logf(" PASS: Object different as expected.")
- continue
- }
-
- // Everything went okay. Is that good?
- if test.Fail {
- t.Errorf(" FAIL: Unexpected success.")
- } else {
- t.Logf(" PASS: Success.")
- }
- }
- }
-}
-
-// dumbStruct is a struct which does not implement the loader/saver interface.
-// We expect that serialization of this struct will fail.
-type dumbStruct struct {
- A int
- B int
-}
-
-// smartStruct is a struct which does implement the loader/saver interface.
-// We expect that serialization of this struct will succeed.
-type smartStruct struct {
- A int
- B int
-}
-
-func (s *smartStruct) save(m Map) {
- m.Save("A", &s.A)
- m.Save("B", &s.B)
-}
-
-func (s *smartStruct) load(m Map) {
- m.Load("A", &s.A)
- m.Load("B", &s.B)
-}
-
-// valueLoadStruct uses a value load.
-type valueLoadStruct struct {
- v int
-}
-
-func (v *valueLoadStruct) save(m Map) {
- m.SaveValue("v", v.v)
-}
-
-func (v *valueLoadStruct) load(m Map) {
- m.LoadValue("v", new(int), func(value interface{}) {
- v.v = value.(int)
- })
-}
-
-// afterLoadStruct has an AfterLoad function.
-type afterLoadStruct struct {
- v int
-}
-
-func (a *afterLoadStruct) save(m Map) {
-}
-
-func (a *afterLoadStruct) load(m Map) {
- m.AfterLoad(func() {
- a.v++
- })
-}
-
-// genericContainer is a generic dispatcher.
-type genericContainer struct {
- v interface{}
-}
-
-func (g *genericContainer) save(m Map) {
- m.Save("v", &g.v)
-}
-
-func (g *genericContainer) load(m Map) {
- m.Load("v", &g.v)
-}
-
-// sliceContainer is a generic slice.
-type sliceContainer struct {
- v []interface{}
-}
-
-func (s *sliceContainer) save(m Map) {
- m.Save("v", &s.v)
-}
-
-func (s *sliceContainer) load(m Map) {
- m.Load("v", &s.v)
-}
-
-// mapContainer is a generic map.
-type mapContainer struct {
- v map[int]interface{}
-}
-
-func (mc *mapContainer) save(m Map) {
- m.Save("v", &mc.v)
-}
-
-func (mc *mapContainer) load(m Map) {
- // Some of the test cases below assume legacy behavior wherein maps
- // will automatically inherit dependencies.
- m.LoadWait("v", &mc.v)
-}
-
-// dumbMap is a map which does not implement the loader/saver interface.
-// Serialization of this map will default to the standard encode/decode logic.
-type dumbMap map[string]int
-
-// pointerStruct contains various pointers, shared and non-shared, and pointers
-// to pointers. We expect that serialization will respect the structure.
-type pointerStruct struct {
- A *int
- B *int
- C *int
- D *int
-
- AA **int
- BB **int
-}
-
-func (p *pointerStruct) save(m Map) {
- m.Save("A", &p.A)
- m.Save("B", &p.B)
- m.Save("C", &p.C)
- m.Save("D", &p.D)
- m.Save("AA", &p.AA)
- m.Save("BB", &p.BB)
-}
-
-func (p *pointerStruct) load(m Map) {
- m.Load("A", &p.A)
- m.Load("B", &p.B)
- m.Load("C", &p.C)
- m.Load("D", &p.D)
- m.Load("AA", &p.AA)
- m.Load("BB", &p.BB)
-}
-
-// testInterface is a trivial interface example.
-type testInterface interface {
- Foo()
-}
-
-// testImpl is a trivial implementation of testInterface.
-type testImpl struct {
-}
-
-// Foo satisfies testInterface.
-func (t *testImpl) Foo() {
-}
-
-// testImpl is trivially serializable.
-func (t *testImpl) save(m Map) {
-}
-
-// testImpl is trivially serializable.
-func (t *testImpl) load(m Map) {
-}
-
-// testI demonstrates interface dispatching.
-type testI struct {
- I testInterface
-}
-
-func (t *testI) save(m Map) {
- m.Save("I", &t.I)
-}
-
-func (t *testI) load(m Map) {
- m.Load("I", &t.I)
-}
-
-// cycleStruct is used to implement basic cycles.
-type cycleStruct struct {
- c *cycleStruct
-}
-
-func (c *cycleStruct) save(m Map) {
- m.Save("c", &c.c)
-}
-
-func (c *cycleStruct) load(m Map) {
- m.Load("c", &c.c)
-}
-
-// badCycleStruct actually has deadlocking dependencies.
-//
-// This should pass if b.b = {nil|b} and fail otherwise.
-type badCycleStruct struct {
- b *badCycleStruct
-}
-
-func (b *badCycleStruct) save(m Map) {
- m.Save("b", &b.b)
-}
-
-func (b *badCycleStruct) load(m Map) {
- m.LoadWait("b", &b.b)
- m.AfterLoad(func() {
- // This is not executable, since AfterLoad requires that the
- // object and all dependencies are complete. This should cause
- // a deadlock error during load.
- })
-}
-
-// emptyStructPointer points to an empty struct.
-type emptyStructPointer struct {
- nothing *struct{}
-}
-
-func (e *emptyStructPointer) save(m Map) {
- m.Save("nothing", &e.nothing)
-}
-
-func (e *emptyStructPointer) load(m Map) {
- m.Load("nothing", &e.nothing)
-}
-
-// truncateInteger truncates an integer.
-type truncateInteger struct {
- v int64
- v2 int32
-}
-
-func (t *truncateInteger) save(m Map) {
- t.v2 = int32(t.v)
- m.Save("v", &t.v)
-}
-
-func (t *truncateInteger) load(m Map) {
- m.Load("v", &t.v2)
- t.v = int64(t.v2)
-}
-
-// truncateUnsignedInteger truncates an unsigned integer.
-type truncateUnsignedInteger struct {
- v uint64
- v2 uint32
-}
-
-func (t *truncateUnsignedInteger) save(m Map) {
- t.v2 = uint32(t.v)
- m.Save("v", &t.v)
-}
-
-func (t *truncateUnsignedInteger) load(m Map) {
- m.Load("v", &t.v2)
- t.v = uint64(t.v2)
-}
-
-// truncateFloat truncates a floating point number.
-type truncateFloat struct {
- v float64
- v2 float32
-}
-
-func (t *truncateFloat) save(m Map) {
- t.v2 = float32(t.v)
- m.Save("v", &t.v)
-}
-
-func (t *truncateFloat) load(m Map) {
- m.Load("v", &t.v2)
- t.v = float64(t.v2)
-}
-
-func TestTypes(t *testing.T) {
- // x and y are basic integers, while xp points to x.
- x := 1
- y := 2
- xp := &x
-
- // cs is a single object cycle.
- cs := cycleStruct{nil}
- cs.c = &cs
-
- // cs1 and cs2 are in a two object cycle.
- cs1 := cycleStruct{nil}
- cs2 := cycleStruct{nil}
- cs1.c = &cs2
- cs2.c = &cs1
-
- // bs is a single object cycle.
- bs := badCycleStruct{nil}
- bs.b = &bs
-
- // bs2 and bs2 are in a deadlocking cycle.
- bs1 := badCycleStruct{nil}
- bs2 := badCycleStruct{nil}
- bs1.b = &bs2
- bs2.b = &bs1
-
- // regular nils.
- var (
- nilmap dumbMap
- nilslice []byte
- )
-
- // embed points to embedded fields.
- embed1 := pointerStruct{}
- embed1.AA = &embed1.A
- embed2 := pointerStruct{}
- embed2.BB = &embed2.B
-
- // es1 contains two structs pointing to the same empty struct.
- es := emptyStructPointer{new(struct{})}
- es1 := []emptyStructPointer{es, es}
-
- tests := []TestCase{
- {
- Name: "bool",
- Objects: []interface{}{
- true,
- false,
- },
- },
- {
- Name: "integers",
- Objects: []interface{}{
- int(0),
- int(1),
- int(-1),
- int8(0),
- int8(1),
- int8(-1),
- int16(0),
- int16(1),
- int16(-1),
- int32(0),
- int32(1),
- int32(-1),
- int64(0),
- int64(1),
- int64(-1),
- },
- },
- {
- Name: "unsigned integers",
- Objects: []interface{}{
- uint(0),
- uint(1),
- uint8(0),
- uint8(1),
- uint16(0),
- uint16(1),
- uint32(1),
- uint64(0),
- uint64(1),
- },
- },
- {
- Name: "strings",
- Objects: []interface{}{
- "",
- "foo",
- "bar",
- "\xa0",
- },
- },
- {
- Name: "slices",
- Objects: []interface{}{
- []int{-1, 0, 1},
- []*int{&x, &x, &x},
- []int{1, 2, 3}[0:1],
- []int{1, 2, 3}[1:2],
- make([]byte, 32),
- make([]byte, 32)[:16],
- make([]byte, 32)[:16:20],
- nilslice,
- },
- },
- {
- Name: "arrays",
- Objects: []interface{}{
- &[1048576]bool{false, true, false, true},
- &[1048576]uint8{0, 1, 2, 3},
- &[1048576]byte{0, 1, 2, 3},
- &[1048576]uint16{0, 1, 2, 3},
- &[1048576]uint{0, 1, 2, 3},
- &[1048576]uint32{0, 1, 2, 3},
- &[1048576]uint64{0, 1, 2, 3},
- &[1048576]uintptr{0, 1, 2, 3},
- &[1048576]int8{0, -1, -2, -3},
- &[1048576]int16{0, -1, -2, -3},
- &[1048576]int32{0, -1, -2, -3},
- &[1048576]int64{0, -1, -2, -3},
- &[1048576]float32{0, 1.1, 2.2, 3.3},
- &[1048576]float64{0, 1.1, 2.2, 3.3},
- },
- },
- {
- Name: "pointers",
- Objects: []interface{}{
- &pointerStruct{A: &x, B: &x, C: &y, D: &y, AA: &xp, BB: &xp},
- &pointerStruct{},
- },
- },
- {
- Name: "empty struct",
- Objects: []interface{}{
- struct{}{},
- },
- },
- {
- Name: "unenlightened structs",
- Objects: []interface{}{
- &dumbStruct{A: 1, B: 2},
- },
- Fail: true,
- },
- {
- Name: "enlightened structs",
- Objects: []interface{}{
- &smartStruct{A: 1, B: 2},
- },
- },
- {
- Name: "load-hooks",
- Objects: []interface{}{
- &afterLoadStruct{v: 1},
- &valueLoadStruct{v: 1},
- &genericContainer{v: &afterLoadStruct{v: 1}},
- &genericContainer{v: &valueLoadStruct{v: 1}},
- &sliceContainer{v: []interface{}{&afterLoadStruct{v: 1}}},
- &sliceContainer{v: []interface{}{&valueLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: &afterLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: &valueLoadStruct{v: 1}}},
- },
- },
- {
- Name: "maps",
- Objects: []interface{}{
- dumbMap{"a": -1, "b": 0, "c": 1},
- map[smartStruct]int{{}: 0, {A: 1}: 1},
- nilmap,
- &mapContainer{v: map[int]interface{}{0: &smartStruct{A: 1}}},
- },
- },
- {
- Name: "interfaces",
- Objects: []interface{}{
- &testI{&testImpl{}},
- &testI{nil},
- &testI{(*testImpl)(nil)},
- },
- },
- {
- Name: "unregistered-interfaces",
- Objects: []interface{}{
- &genericContainer{v: afterLoadStruct{v: 1}},
- &genericContainer{v: valueLoadStruct{v: 1}},
- &sliceContainer{v: []interface{}{afterLoadStruct{v: 1}}},
- &sliceContainer{v: []interface{}{valueLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: afterLoadStruct{v: 1}}},
- &mapContainer{v: map[int]interface{}{0: valueLoadStruct{v: 1}}},
- },
- Fail: true,
- },
- {
- Name: "cycles",
- Objects: []interface{}{
- &cs,
- &cs1,
- &cycleStruct{&cs1},
- &cycleStruct{&cs},
- &badCycleStruct{nil},
- &bs,
- },
- },
- {
- Name: "deadlock",
- Objects: []interface{}{
- &bs1,
- },
- Fail: true,
- },
- {
- Name: "embed",
- Objects: []interface{}{
- &embed1,
- &embed2,
- },
- Fail: true,
- },
- {
- Name: "empty structs",
- Objects: []interface{}{
- new(struct{}),
- es,
- es1,
- },
- },
- {
- Name: "truncated okay",
- Objects: []interface{}{
- &truncateInteger{v: 1},
- &truncateUnsignedInteger{v: 1},
- &truncateFloat{v: 1.0},
- },
- },
- {
- Name: "truncated bad",
- Objects: []interface{}{
- &truncateInteger{v: math.MaxInt32 + 1},
- &truncateUnsignedInteger{v: math.MaxUint32 + 1},
- &truncateFloat{v: math.MaxFloat32 * 2},
- },
- Fail: true,
- },
- }
-
- runTest(t, tests)
-}
-
-// benchStruct is used for benchmarking.
-type benchStruct struct {
- b *benchStruct
-
- // Dummy data is included to ensure that these objects are large.
- // This is to detect possible regression when registering objects.
- _ [4096]byte
-}
-
-func (b *benchStruct) save(m Map) {
- m.Save("b", &b.b)
-}
-
-func (b *benchStruct) load(m Map) {
- m.LoadWait("b", &b.b)
- m.AfterLoad(b.afterLoad)
-}
-
-func (b *benchStruct) afterLoad() {
- // Do nothing, just force scheduling.
-}
-
-// buildObject builds a benchmark object.
-func buildObject(n int) (b *benchStruct) {
- for i := 0; i < n; i++ {
- b = &benchStruct{b: b}
- }
- return
-}
-
-func BenchmarkEncoding(b *testing.B) {
- b.StopTimer()
- bs := buildObject(b.N)
- var stats Stats
- b.StartTimer()
- if err := Save(ioutil.Discard, bs, &stats); err != nil {
- b.Errorf("save failed: %v", err)
- }
- b.StopTimer()
- if b.N > 1000 {
- b.Logf("breakdown (n=%d): %s", b.N, &stats)
- }
-}
-
-func BenchmarkDecoding(b *testing.B) {
- b.StopTimer()
- bs := buildObject(b.N)
- var newBS benchStruct
- buf := &bytes.Buffer{}
- if err := Save(buf, bs, nil); err != nil {
- b.Errorf("save failed: %v", err)
- }
- var stats Stats
- b.StartTimer()
- if err := Load(buf, &newBS, &stats); err != nil {
- b.Errorf("load failed: %v", err)
- }
- b.StopTimer()
- if b.N > 1000 {
- b.Logf("breakdown (n=%d): %s", b.N, &stats)
- }
-}
-
-func init() {
- Register("stateTest.smartStruct", (*smartStruct)(nil), Fns{
- Save: (*smartStruct).save,
- Load: (*smartStruct).load,
- })
- Register("stateTest.afterLoadStruct", (*afterLoadStruct)(nil), Fns{
- Save: (*afterLoadStruct).save,
- Load: (*afterLoadStruct).load,
- })
- Register("stateTest.valueLoadStruct", (*valueLoadStruct)(nil), Fns{
- Save: (*valueLoadStruct).save,
- Load: (*valueLoadStruct).load,
- })
- Register("stateTest.genericContainer", (*genericContainer)(nil), Fns{
- Save: (*genericContainer).save,
- Load: (*genericContainer).load,
- })
- Register("stateTest.sliceContainer", (*sliceContainer)(nil), Fns{
- Save: (*sliceContainer).save,
- Load: (*sliceContainer).load,
- })
- Register("stateTest.mapContainer", (*mapContainer)(nil), Fns{
- Save: (*mapContainer).save,
- Load: (*mapContainer).load,
- })
- Register("stateTest.pointerStruct", (*pointerStruct)(nil), Fns{
- Save: (*pointerStruct).save,
- Load: (*pointerStruct).load,
- })
- Register("stateTest.testImpl", (*testImpl)(nil), Fns{
- Save: (*testImpl).save,
- Load: (*testImpl).load,
- })
- Register("stateTest.testI", (*testI)(nil), Fns{
- Save: (*testI).save,
- Load: (*testI).load,
- })
- Register("stateTest.cycleStruct", (*cycleStruct)(nil), Fns{
- Save: (*cycleStruct).save,
- Load: (*cycleStruct).load,
- })
- Register("stateTest.badCycleStruct", (*badCycleStruct)(nil), Fns{
- Save: (*badCycleStruct).save,
- Load: (*badCycleStruct).load,
- })
- Register("stateTest.emptyStructPointer", (*emptyStructPointer)(nil), Fns{
- Save: (*emptyStructPointer).save,
- Load: (*emptyStructPointer).load,
- })
- Register("stateTest.truncateInteger", (*truncateInteger)(nil), Fns{
- Save: (*truncateInteger).save,
- Load: (*truncateInteger).load,
- })
- Register("stateTest.truncateUnsignedInteger", (*truncateUnsignedInteger)(nil), Fns{
- Save: (*truncateUnsignedInteger).save,
- Load: (*truncateUnsignedInteger).load,
- })
- Register("stateTest.truncateFloat", (*truncateFloat)(nil), Fns{
- Save: (*truncateFloat).save,
- Load: (*truncateFloat).load,
- })
- Register("stateTest.benchStruct", (*benchStruct)(nil), Fns{
- Save: (*benchStruct).save,
- Load: (*benchStruct).load,
- })
-}
diff --git a/pkg/state/statefile/BUILD b/pkg/state/statefile/BUILD
deleted file mode 100644
index 8a865d229..000000000
--- a/pkg/state/statefile/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools/go_stateify:defs.bzl", "go_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "statefile",
- srcs = ["statefile.go"],
- importpath = "gvisor.dev/gvisor/pkg/state/statefile",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/binary",
- "//pkg/compressio",
- ],
-)
-
-go_test(
- name = "statefile_test",
- size = "small",
- srcs = ["statefile_test.go"],
- embed = [":statefile"],
- deps = ["//pkg/compressio"],
-)
diff --git a/pkg/state/statefile/statefile_state_autogen.go b/pkg/state/statefile/statefile_state_autogen.go
new file mode 100755
index 000000000..438c485ca
--- /dev/null
+++ b/pkg/state/statefile/statefile_state_autogen.go
@@ -0,0 +1,4 @@
+// automatically generated by stateify.
+
+package statefile
+
diff --git a/pkg/state/statefile/statefile_test.go b/pkg/state/statefile/statefile_test.go
deleted file mode 100644
index 0b470fdec..000000000
--- a/pkg/state/statefile/statefile_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package statefile
-
-import (
- "bytes"
- crand "crypto/rand"
- "encoding/base64"
- "io"
- "math/rand"
- "runtime"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/compressio"
-)
-
-func randomKey() ([]byte, error) {
- r := make([]byte, base64.RawStdEncoding.DecodedLen(keySize))
- if _, err := io.ReadFull(crand.Reader, r); err != nil {
- return nil, err
- }
- key := make([]byte, keySize)
- base64.RawStdEncoding.Encode(key, r)
- return key, nil
-}
-
-type testCase struct {
- name string
- data []byte
- metadata map[string]string
-}
-
-func TestStatefile(t *testing.T) {
- rand.Seed(time.Now().Unix())
-
- cases := []testCase{
- // Various data sizes.
- {"nil", nil, nil},
- {"empty", []byte(""), nil},
- {"some", []byte("_"), nil},
- {"one", []byte("0"), nil},
- {"two", []byte("01"), nil},
- {"three", []byte("012"), nil},
- {"four", []byte("0123"), nil},
- {"five", []byte("01234"), nil},
- {"six", []byte("012356"), nil},
- {"seven", []byte("0123567"), nil},
- {"eight", []byte("01235678"), nil},
-
- // Make sure we have one longer than the hash length.
- {"longer than hash", []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), nil},
-
- // Make sure we have one longer than the chunk size.
- {"chunks", make([]byte, 3*compressionChunkSize), nil},
- {"large", make([]byte, 30*compressionChunkSize), nil},
-
- // Different metadata.
- {"one metadata", []byte("data"), map[string]string{"foo": "bar"}},
- {"two metadata", []byte("data"), map[string]string{"foo": "bar", "one": "two"}},
- }
-
- for _, c := range cases {
- // Generate a key.
- integrityKey, err := randomKey()
- if err != nil {
- t.Errorf("can't generate key: got %v, excepted nil", err)
- continue
- }
-
- t.Run(c.name, func(t *testing.T) {
- for _, key := range [][]byte{nil, integrityKey} {
- t.Run("key="+string(key), func(t *testing.T) {
- // Encoding happens via a buffer.
- var bufEncoded bytes.Buffer
- var bufDecoded bytes.Buffer
-
- // Do all the writing.
- w, err := NewWriter(&bufEncoded, key, c.metadata)
- if err != nil {
- t.Fatalf("error creating writer: got %v, expected nil", err)
- }
- if _, err := io.Copy(w, bytes.NewBuffer(c.data)); err != nil {
- t.Fatalf("error during write: got %v, expected nil", err)
- }
-
- // Finish the sum.
- if err := w.Close(); err != nil {
- t.Fatalf("error during close: got %v, expected nil", err)
- }
-
- t.Logf("original data: %d bytes, encoded: %d bytes.",
- len(c.data), len(bufEncoded.Bytes()))
-
- // Do all the reading.
- r, metadata, err := NewReader(bytes.NewReader(bufEncoded.Bytes()), key)
- if err != nil {
- t.Fatalf("error creating reader: got %v, expected nil", err)
- }
- if _, err := io.Copy(&bufDecoded, r); err != nil {
- t.Fatalf("error during read: got %v, expected nil", err)
- }
-
- // Check that the data matches.
- if !bytes.Equal(c.data, bufDecoded.Bytes()) {
- t.Fatalf("data didn't match (%d vs %d bytes)", len(bufDecoded.Bytes()), len(c.data))
- }
-
- // Check that the metadata matches.
- for k, v := range c.metadata {
- nv, ok := metadata[k]
- if !ok {
- t.Fatalf("missing metadata: %s", k)
- }
- if v != nv {
- t.Fatalf("mismatched metdata for %s: got %s, expected %s", k, nv, v)
- }
- }
-
- // Change the data and verify that it fails.
- if key != nil {
- b := append([]byte(nil), bufEncoded.Bytes()...)
- b[rand.Intn(len(b))]++
- bufDecoded.Reset()
- r, _, err = NewReader(bytes.NewReader(b), key)
- if err == nil {
- _, err = io.Copy(&bufDecoded, r)
- }
- if err == nil {
- t.Error("got no error: expected error on data corruption")
- }
- }
-
- // Change the key and verify that it fails.
- newKey := integrityKey
- if len(key) > 0 {
- newKey = append([]byte{}, key...)
- newKey[rand.Intn(len(newKey))]++
- }
- bufDecoded.Reset()
- r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), newKey)
- if err == nil {
- _, err = io.Copy(&bufDecoded, r)
- }
- if err != compressio.ErrHashMismatch {
- t.Errorf("got error: %v, expected ErrHashMismatch on key mismatch", err)
- }
- })
- }
- })
- }
-}
-
-const benchmarkDataSize = 100 * 1024 * 1024
-
-func benchmark(b *testing.B, size int, write bool, compressible bool) {
- b.StopTimer()
- b.SetBytes(benchmarkDataSize)
-
- // Generate source data.
- var source []byte
- if compressible {
- // For compressible data, we use essentially all zeros.
- source = make([]byte, benchmarkDataSize)
- } else {
- // For non-compressible data, we use random base64 data (to
- // make it marginally compressible, a ratio of 75%).
- var sourceBuf bytes.Buffer
- bufW := base64.NewEncoder(base64.RawStdEncoding, &sourceBuf)
- bufR := rand.New(rand.NewSource(0))
- if _, err := io.CopyN(bufW, bufR, benchmarkDataSize); err != nil {
- b.Fatalf("unable to seed random data: %v", err)
- }
- source = sourceBuf.Bytes()
- }
-
- // Generate a random key for integrity check.
- key, err := randomKey()
- if err != nil {
- b.Fatalf("error generating key: %v", err)
- }
-
- // Define our benchmark functions. Prior to running the readState
- // function here, you must execute the writeState function at least
- // once (done below).
- var stateBuf bytes.Buffer
- writeState := func() {
- stateBuf.Reset()
- w, err := NewWriter(&stateBuf, key, nil)
- if err != nil {
- b.Fatalf("error creating writer: %v", err)
- }
- for done := 0; done < len(source); {
- chunk := size // limit size.
- if done+chunk > len(source) {
- chunk = len(source) - done
- }
- n, err := w.Write(source[done : done+chunk])
- done += n
- if n == 0 && err != nil {
- b.Fatalf("error during write: %v", err)
- }
- }
- if err := w.Close(); err != nil {
- b.Fatalf("error closing writer: %v", err)
- }
- }
- readState := func() {
- tmpBuf := bytes.NewBuffer(stateBuf.Bytes())
- r, _, err := NewReader(tmpBuf, key)
- if err != nil {
- b.Fatalf("error creating reader: %v", err)
- }
- for done := 0; done < len(source); {
- chunk := size // limit size.
- if done+chunk > len(source) {
- chunk = len(source) - done
- }
- n, err := r.Read(source[done : done+chunk])
- done += n
- if n == 0 && err != nil {
- b.Fatalf("error during read: %v", err)
- }
- }
- }
- // Generate the state once without timing to ensure that buffers have
- // been appropriately allocated.
- writeState()
- if write {
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- writeState()
- }
- b.StopTimer()
- } else {
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- readState()
- }
- b.StopTimer()
- }
-}
-
-func BenchmarkWrite4KCompressible(b *testing.B) {
- benchmark(b, 4096, true, true)
-}
-
-func BenchmarkWrite4KNoncompressible(b *testing.B) {
- benchmark(b, 4096, true, false)
-}
-
-func BenchmarkWrite1MCompressible(b *testing.B) {
- benchmark(b, 1024*1024, true, true)
-}
-
-func BenchmarkWrite1MNoncompressible(b *testing.B) {
- benchmark(b, 1024*1024, true, false)
-}
-
-func BenchmarkRead4KCompressible(b *testing.B) {
- benchmark(b, 4096, false, true)
-}
-
-func BenchmarkRead4KNoncompressible(b *testing.B) {
- benchmark(b, 4096, false, false)
-}
-
-func BenchmarkRead1MCompressible(b *testing.B) {
- benchmark(b, 1024*1024, false, true)
-}
-
-func BenchmarkRead1MNoncompressible(b *testing.B) {
- benchmark(b, 1024*1024, false, false)
-}
-
-func init() {
- runtime.GOMAXPROCS(runtime.NumCPU())
-}