summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs/proc/seqfile
diff options
context:
space:
mode:
authorGoogler <noreply@google.com>2018-04-27 10:37:02 -0700
committerAdin Scannell <ascannell@google.com>2018-04-28 01:44:26 -0400
commitd02b74a5dcfed4bfc8f2f8e545bca4d2afabb296 (patch)
tree54f95eef73aee6bacbfc736fffc631be2605ed53 /pkg/sentry/fs/proc/seqfile
parentf70210e742919f40aa2f0934a22f1c9ba6dada62 (diff)
Check in gVisor.
PiperOrigin-RevId: 194583126 Change-Id: Ica1d8821a90f74e7e745962d71801c598c652463
Diffstat (limited to 'pkg/sentry/fs/proc/seqfile')
-rw-r--r--pkg/sentry/fs/proc/seqfile/BUILD55
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile.go232
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile_test.go272
3 files changed, 559 insertions, 0 deletions
diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD
new file mode 100644
index 000000000..48dd25e5b
--- /dev/null
+++ b/pkg/sentry/fs/proc/seqfile/BUILD
@@ -0,0 +1,55 @@
+package(licenses = ["notice"]) # Apache 2.0
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("//tools/go_stateify:defs.bzl", "go_stateify")
+
+go_stateify(
+ name = "seqfile_state",
+ srcs = [
+ "seqfile.go",
+ ],
+ out = "seqfile_state.go",
+ package = "seqfile",
+)
+
+go_library(
+ name = "seqfile",
+ srcs = [
+ "seqfile.go",
+ "seqfile_state.go",
+ ],
+ importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile",
+ visibility = ["//pkg/sentry:internal"],
+ deps = [
+ "//pkg/sentry/context",
+ "//pkg/sentry/fs",
+ "//pkg/sentry/fs/proc/device",
+ "//pkg/sentry/fs/ramfs",
+ "//pkg/sentry/kernel/time",
+ "//pkg/sentry/usermem",
+ "//pkg/state",
+ ],
+)
+
+go_stateify(
+ name = "seqfile_test_state",
+ srcs = ["seqfile_test.go"],
+ out = "seqfile_test_state.go",
+ package = "seqfile",
+)
+
+go_test(
+ name = "seqfile_test",
+ size = "small",
+ srcs = [
+ "seqfile_test.go",
+ "seqfile_test_state.go",
+ ],
+ embed = [":seqfile"],
+ deps = [
+ "//pkg/sentry/context/contexttest",
+ "//pkg/sentry/fs",
+ "//pkg/sentry/fs/ramfs/test",
+ "//pkg/sentry/usermem",
+ ],
+)
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go
new file mode 100644
index 000000000..e37a85869
--- /dev/null
+++ b/pkg/sentry/fs/proc/seqfile/seqfile.go
@@ -0,0 +1,232 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seqfile
+
+import (
+ "io"
+ "sync"
+
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/fs"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs"
+ ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+// SeqHandle is a helper handle to seek in the file.
+type SeqHandle interface{}
+
+// SeqData holds the data for one unit in the file.
+type SeqData struct {
+ // The data to be returned to the user.
+ Buf []byte
+
+ // A seek handle used to find the next valid unit in ReadSeqFiledata.
+ Handle SeqHandle
+}
+
+// SeqSource is a data source for a SeqFile file.
+type SeqSource interface {
+ // NeedsUpdate returns true if the consumer of SeqData should call
+ // ReadSeqFileData again. Generation is the generation returned by
+ // ReadSeqFile or 0.
+ NeedsUpdate(generation int64) bool
+
+ // Returns a slice of SeqData ordered by unit and the current
+ // generation. The first entry in the slice is greater than the handle.
+ // If handle is nil then all known records are returned. Generation
+ // must always be greater than 0.
+ ReadSeqFileData(handle SeqHandle) ([]SeqData, int64)
+}
+
+// SeqGenerationCounter is a counter to keep track if the SeqSource should be
+// updated. SeqGenerationCounter is not thread-safe and should be protected
+// with a mutex.
+type SeqGenerationCounter struct {
+ // The generation that the SeqData is at.
+ generation int64
+}
+
+// SetGeneration sets the generation to the new value, be careful to not set it
+// to a value less than current.
+func (s *SeqGenerationCounter) SetGeneration(generation int64) {
+ s.generation = generation
+}
+
+// Update increments the current generation.
+func (s *SeqGenerationCounter) Update() {
+ s.generation++
+}
+
+// Generation returns the current generation counter.
+func (s *SeqGenerationCounter) Generation() int64 {
+ return s.generation
+}
+
+// IsCurrent returns whether the given generation is current or not.
+func (s *SeqGenerationCounter) IsCurrent(generation int64) bool {
+ return s.Generation() == generation
+}
+
+// SeqFile is used to provide dynamic files that can be ordered by record.
+type SeqFile struct {
+ ramfs.Entry
+
+ // mu protects the fields below.
+ mu sync.Mutex `state:"nosave"`
+
+ SeqSource
+
+ source []SeqData
+ generation int64
+ lastRead int64
+}
+
+// NewSeqFile returns a seqfile suitable for use by external consumers.
+func NewSeqFile(ctx context.Context, source SeqSource) *SeqFile {
+ s := &SeqFile{SeqSource: source}
+ s.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444))
+ return s
+}
+
+// NewSeqFileInode returns an Inode with SeqFile InodeOperations.
+func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource) *fs.Inode {
+ iops := NewSeqFile(ctx, source)
+ sattr := fs.StableAttr{
+ DeviceID: device.ProcDevice.DeviceID(),
+ InodeID: device.ProcDevice.NextIno(),
+ BlockSize: usermem.PageSize,
+ Type: fs.SpecialFile,
+ }
+ return fs.NewInode(iops, msrc, sattr)
+}
+
+// UnstableAttr returns unstable attributes of the SeqFile.
+func (s *SeqFile) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) {
+ uattr, _ := s.Entry.UnstableAttr(ctx, inode)
+ uattr.ModificationTime = ktime.NowFromContext(ctx)
+ return uattr, nil
+}
+
+// findIndexAndOffset finds the unit that corresponds to a certain offset.
+// Returns the unit and the offset within the unit. If there are not enough
+// units len(data) and leftover offset is returned.
+func findIndexAndOffset(data []SeqData, offset int64) (int, int64) {
+ for i, buf := range data {
+ l := int64(len(buf.Buf))
+ if offset < l {
+ return i, offset
+ }
+ offset -= l
+ }
+ return len(data), offset
+}
+
+// DeprecatedPreadv reads from the file at the given offset.
+func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.Entry.NotifyAccess(ctx)
+ defer func() { s.lastRead = offset }()
+
+ updated := false
+
+ // Try to find where we should start reading this file.
+ i, recordOffset := findIndexAndOffset(s.source, offset)
+ if i == len(s.source) {
+ // Ok, we're at EOF. Let's first check to see if there might be
+ // more data available to us. If there is more data, add it to
+ // the end and try reading again.
+ if !s.SeqSource.NeedsUpdate(s.generation) {
+ return 0, io.EOF
+ }
+ oldLen := len(s.source)
+ s.updateSourceLocked(len(s.source))
+ updated = true
+ // We know that we had consumed everything up until this point
+ // so we search in the new slice instead of starting over.
+ i, recordOffset = findIndexAndOffset(s.source[oldLen:], recordOffset)
+ i += oldLen
+ // i is at most the length of the slice which is
+ // len(s.source) - oldLen. So at most i will be equal to
+ // len(s.source).
+ if i == len(s.source) {
+ return 0, io.EOF
+ }
+ }
+
+ var done int64
+ // We're reading parts of a record, finish reading the current object
+ // before continuing on to the next. We don't refresh our data source
+ // before this record is completed.
+ if recordOffset != 0 {
+ n, err := dst.CopyOut(ctx, s.source[i].Buf[recordOffset:])
+ done += int64(n)
+ dst = dst.DropFirst(n)
+ if dst.NumBytes() == 0 || err != nil {
+ return done, err
+ }
+ i++
+ }
+
+ // Next/New unit, update the source file if necessary. Make an extra
+ // check to see if we've seeked backwards and if so always update our
+ // data source.
+ if !updated && (s.SeqSource.NeedsUpdate(s.generation) || s.lastRead > offset) {
+ s.updateSourceLocked(i)
+ // recordOffset is 0 here and we won't update records behind the
+ // current one so recordOffset is still 0 even though source
+ // just got updated. Just read the next record.
+ }
+
+ // Finish by reading all the available data.
+ for _, buf := range s.source[i:] {
+ n, err := dst.CopyOut(ctx, buf.Buf)
+ done += int64(n)
+ dst = dst.DropFirst(n)
+ if dst.NumBytes() == 0 || err != nil {
+ return done, err
+ }
+ }
+
+ // If the file shrank (entries not yet read were removed above)
+ // while we tried to read we can end up with nothing read.
+ if done == 0 && dst.NumBytes() != 0 {
+ return 0, io.EOF
+ }
+ return done, nil
+}
+
+// updateSourceLocked requires that s.mu is held.
+func (s *SeqFile) updateSourceLocked(record int) {
+ var h SeqHandle
+ if record == 0 {
+ h = nil
+ } else {
+ h = s.source[record-1].Handle
+ }
+ // Save what we have previously read.
+ s.source = s.source[:record]
+ var newSource []SeqData
+ newSource, s.generation = s.SeqSource.ReadSeqFileData(h)
+ s.source = append(s.source, newSource...)
+}
+
+// DeprecatedPwritev is always denied.
+func (*SeqFile) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) {
+ return 0, ramfs.ErrDenied
+}
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go
new file mode 100644
index 000000000..0bf39ad82
--- /dev/null
+++ b/pkg/sentry/fs/proc/seqfile/seqfile_test.go
@@ -0,0 +1,272 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seqfile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "testing"
+
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/fs"
+ ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+type seqTest struct {
+ actual []SeqData
+ update bool
+}
+
+func (s *seqTest) Init() {
+ var sq []SeqData
+ // Create some SeqData.
+ for i := 0; i < 10; i++ {
+ var b []byte
+ for j := 0; j < 10; j++ {
+ b = append(b, byte(i))
+ }
+ sq = append(sq, SeqData{
+ Buf: b,
+ Handle: &testHandle{i: i},
+ })
+ }
+ s.actual = sq
+}
+
+// NeedsUpdate reports whether we need to update the data we've previously read.
+func (s *seqTest) NeedsUpdate(int64) bool {
+ return s.update
+}
+
+// ReadSeqFiledata returns a slice of SeqData which contains elements
+// greater than the handle.
+func (s *seqTest) ReadSeqFileData(handle SeqHandle) ([]SeqData, int64) {
+ if handle == nil {
+ return s.actual, 0
+ }
+ h := *handle.(*testHandle)
+ var ret []SeqData
+ for _, b := range s.actual {
+ // We want the next one.
+ h2 := *b.Handle.(*testHandle)
+ if h2.i > h.i {
+ ret = append(ret, b)
+ }
+ }
+ return ret, 0
+}
+
+// Flatten a slice of slices into one slice.
+func flatten(buf ...[]byte) []byte {
+ var flat []byte
+ for _, b := range buf {
+ flat = append(flat, b...)
+ }
+ return flat
+}
+
+type testHandle struct {
+ i int
+}
+
+type testTable struct {
+ offset int64
+ readBufferSize int
+ expectedData []byte
+ expectedError error
+}
+
+func runTableTests(ctx context.Context, table []testTable, n fs.InodeOperations) error {
+ for _, tt := range table {
+ data := make([]byte, tt.readBufferSize)
+ resultLen, err := n.DeprecatedPreadv(ctx, usermem.BytesIOSequence(data), tt.offset)
+ if err != tt.expectedError {
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError)
+ }
+ expectedLen := int64(len(tt.expectedData))
+ if resultLen != expectedLen {
+ // We make this just an error so we wall through and print the data below.
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen)
+ }
+ if !bytes.Equal(data[:expectedLen], tt.expectedData) {
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData)
+ }
+ }
+ return nil
+}
+
+func TestSeqFile(t *testing.T) {
+ testSource := &seqTest{}
+ testSource.Init()
+
+ // Create a file that can be R/W.
+ m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{})
+ ctx := contexttest.Context(t)
+ contents := map[string]*fs.Inode{
+ "foo": NewSeqFileInode(ctx, testSource, m),
+ }
+ root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777))
+
+ // How about opening it?
+ inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory})
+ dirent2, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo for n2: %v", err)
+ }
+ n2 := dirent2.Inode.InodeOperations
+
+ // Writing?
+ if _, err := n2.DeprecatedPwritev(nil, usermem.BytesIOSequence([]byte("test")), 0); err == nil {
+ t.Fatalf("managed to write to n2: %v", err)
+ }
+
+ // How about reading?
+ dirent3, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo: %v", err)
+ }
+ n3 := dirent3.Inode.InodeOperations
+
+ if n2 != n3 {
+ t.Error("got n2 != n3, want same")
+ }
+
+ testSource.update = true
+
+ table := []testTable{
+ // Read past the end.
+ {100, 4, []byte{}, io.EOF},
+ {110, 4, []byte{}, io.EOF},
+ {200, 4, []byte{}, io.EOF},
+ // Read a truncated first line.
+ {0, 4, testSource.actual[0].Buf[:4], nil},
+ // Read the whole first line.
+ {0, 10, testSource.actual[0].Buf, nil},
+ // Read the whole first line + 5 bytes of second line.
+ {0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil},
+ // First 4 bytes of the second line.
+ {10, 4, testSource.actual[1].Buf[:4], nil},
+ // Read the two first lines.
+ {0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil},
+ // Read three lines.
+ {0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil},
+ // Read everything, but use a bigger buffer than necessary.
+ {0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil},
+ // Read the last 3 bytes.
+ {97, 10, testSource.actual[9].Buf[7:], nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err)
+ }
+
+ // Disable updates and do it again.
+ testSource.update = false
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err)
+ }
+}
+
+// Test that we behave correctly when the file is updated.
+func TestSeqFileFileUpdated(t *testing.T) {
+ testSource := &seqTest{}
+ testSource.Init()
+ testSource.update = true
+
+ // Create a file that can be R/W.
+ m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{})
+ ctx := contexttest.Context(t)
+ contents := map[string]*fs.Inode{
+ "foo": NewSeqFileInode(ctx, testSource, m),
+ }
+ root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777))
+
+ // How about opening it?
+ inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory})
+ dirent2, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo for n2: %v", err)
+ }
+ n2 := dirent2.Inode.InodeOperations
+
+ table := []testTable{
+ {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed: %v", err)
+ }
+ // Delete the first entry.
+ cut := testSource.actual[0].Buf
+ testSource.actual = testSource.actual[1:]
+
+ table = []testTable{
+ // Try reading buffer 0 with an offset. This will not delete the old data.
+ {1, 5, cut[1:6], nil},
+ // Reset our file by reading at offset 0.
+ {0, 10, testSource.actual[0].Buf, nil},
+ {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
+ // Read the same data a second time.
+ {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
+ // Read the following two lines.
+ {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after removing first entry: %v", err)
+ }
+
+ // Add a new duplicate line in the middle (6666...)
+ after := testSource.actual[5:]
+ testSource.actual = testSource.actual[:4]
+ // Note the list must be sorted.
+ testSource.actual = append(testSource.actual, after[0])
+ testSource.actual = append(testSource.actual, after...)
+
+ table = []testTable{
+ {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after adding middle entry: %v", err)
+ }
+ // This will be used in a later test.
+ oldTestData := testSource.actual
+
+ // Delete everything.
+ testSource.actual = testSource.actual[:0]
+ table = []testTable{
+ {20, 20, []byte{}, io.EOF},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after removing all entries: %v", err)
+ }
+ // Restore some of the data.
+ testSource.actual = oldTestData[:1]
+ table = []testTable{
+ {6, 20, testSource.actual[0].Buf[6:], nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after adding first entry back: %v", err)
+ }
+
+ // Re-extend the data
+ testSource.actual = oldTestData
+ table = []testTable{
+ {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after extending testSource: %v", err)
+ }
+}