summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs/proc/seqfile/seqfile_test.go
diff options
context:
space:
mode:
authorGoogler <noreply@google.com>2018-04-27 10:37:02 -0700
committerAdin Scannell <ascannell@google.com>2018-04-28 01:44:26 -0400
commitd02b74a5dcfed4bfc8f2f8e545bca4d2afabb296 (patch)
tree54f95eef73aee6bacbfc736fffc631be2605ed53 /pkg/sentry/fs/proc/seqfile/seqfile_test.go
parentf70210e742919f40aa2f0934a22f1c9ba6dada62 (diff)
Check in gVisor.
PiperOrigin-RevId: 194583126 Change-Id: Ica1d8821a90f74e7e745962d71801c598c652463
Diffstat (limited to 'pkg/sentry/fs/proc/seqfile/seqfile_test.go')
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile_test.go272
1 files changed, 272 insertions, 0 deletions
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go
new file mode 100644
index 000000000..0bf39ad82
--- /dev/null
+++ b/pkg/sentry/fs/proc/seqfile/seqfile_test.go
@@ -0,0 +1,272 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seqfile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "testing"
+
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/fs"
+ ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+)
+
+type seqTest struct {
+ actual []SeqData
+ update bool
+}
+
+func (s *seqTest) Init() {
+ var sq []SeqData
+ // Create some SeqData.
+ for i := 0; i < 10; i++ {
+ var b []byte
+ for j := 0; j < 10; j++ {
+ b = append(b, byte(i))
+ }
+ sq = append(sq, SeqData{
+ Buf: b,
+ Handle: &testHandle{i: i},
+ })
+ }
+ s.actual = sq
+}
+
+// NeedsUpdate reports whether we need to update the data we've previously read.
+func (s *seqTest) NeedsUpdate(int64) bool {
+ return s.update
+}
+
+// ReadSeqFiledata returns a slice of SeqData which contains elements
+// greater than the handle.
+func (s *seqTest) ReadSeqFileData(handle SeqHandle) ([]SeqData, int64) {
+ if handle == nil {
+ return s.actual, 0
+ }
+ h := *handle.(*testHandle)
+ var ret []SeqData
+ for _, b := range s.actual {
+ // We want the next one.
+ h2 := *b.Handle.(*testHandle)
+ if h2.i > h.i {
+ ret = append(ret, b)
+ }
+ }
+ return ret, 0
+}
+
+// Flatten a slice of slices into one slice.
+func flatten(buf ...[]byte) []byte {
+ var flat []byte
+ for _, b := range buf {
+ flat = append(flat, b...)
+ }
+ return flat
+}
+
+type testHandle struct {
+ i int
+}
+
+type testTable struct {
+ offset int64
+ readBufferSize int
+ expectedData []byte
+ expectedError error
+}
+
+func runTableTests(ctx context.Context, table []testTable, n fs.InodeOperations) error {
+ for _, tt := range table {
+ data := make([]byte, tt.readBufferSize)
+ resultLen, err := n.DeprecatedPreadv(ctx, usermem.BytesIOSequence(data), tt.offset)
+ if err != tt.expectedError {
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError)
+ }
+ expectedLen := int64(len(tt.expectedData))
+ if resultLen != expectedLen {
+ // We make this just an error so we wall through and print the data below.
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen)
+ }
+ if !bytes.Equal(data[:expectedLen], tt.expectedData) {
+ return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData)
+ }
+ }
+ return nil
+}
+
+func TestSeqFile(t *testing.T) {
+ testSource := &seqTest{}
+ testSource.Init()
+
+ // Create a file that can be R/W.
+ m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{})
+ ctx := contexttest.Context(t)
+ contents := map[string]*fs.Inode{
+ "foo": NewSeqFileInode(ctx, testSource, m),
+ }
+ root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777))
+
+ // How about opening it?
+ inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory})
+ dirent2, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo for n2: %v", err)
+ }
+ n2 := dirent2.Inode.InodeOperations
+
+ // Writing?
+ if _, err := n2.DeprecatedPwritev(nil, usermem.BytesIOSequence([]byte("test")), 0); err == nil {
+ t.Fatalf("managed to write to n2: %v", err)
+ }
+
+ // How about reading?
+ dirent3, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo: %v", err)
+ }
+ n3 := dirent3.Inode.InodeOperations
+
+ if n2 != n3 {
+ t.Error("got n2 != n3, want same")
+ }
+
+ testSource.update = true
+
+ table := []testTable{
+ // Read past the end.
+ {100, 4, []byte{}, io.EOF},
+ {110, 4, []byte{}, io.EOF},
+ {200, 4, []byte{}, io.EOF},
+ // Read a truncated first line.
+ {0, 4, testSource.actual[0].Buf[:4], nil},
+ // Read the whole first line.
+ {0, 10, testSource.actual[0].Buf, nil},
+ // Read the whole first line + 5 bytes of second line.
+ {0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil},
+ // First 4 bytes of the second line.
+ {10, 4, testSource.actual[1].Buf[:4], nil},
+ // Read the two first lines.
+ {0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil},
+ // Read three lines.
+ {0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil},
+ // Read everything, but use a bigger buffer than necessary.
+ {0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil},
+ // Read the last 3 bytes.
+ {97, 10, testSource.actual[9].Buf[7:], nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err)
+ }
+
+ // Disable updates and do it again.
+ testSource.update = false
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err)
+ }
+}
+
+// Test that we behave correctly when the file is updated.
+func TestSeqFileFileUpdated(t *testing.T) {
+ testSource := &seqTest{}
+ testSource.Init()
+ testSource.update = true
+
+ // Create a file that can be R/W.
+ m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{})
+ ctx := contexttest.Context(t)
+ contents := map[string]*fs.Inode{
+ "foo": NewSeqFileInode(ctx, testSource, m),
+ }
+ root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777))
+
+ // How about opening it?
+ inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory})
+ dirent2, err := root.Lookup(ctx, inode, "foo")
+ if err != nil {
+ t.Fatalf("failed to walk to foo for n2: %v", err)
+ }
+ n2 := dirent2.Inode.InodeOperations
+
+ table := []testTable{
+ {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed: %v", err)
+ }
+ // Delete the first entry.
+ cut := testSource.actual[0].Buf
+ testSource.actual = testSource.actual[1:]
+
+ table = []testTable{
+ // Try reading buffer 0 with an offset. This will not delete the old data.
+ {1, 5, cut[1:6], nil},
+ // Reset our file by reading at offset 0.
+ {0, 10, testSource.actual[0].Buf, nil},
+ {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
+ // Read the same data a second time.
+ {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
+ // Read the following two lines.
+ {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after removing first entry: %v", err)
+ }
+
+ // Add a new duplicate line in the middle (6666...)
+ after := testSource.actual[5:]
+ testSource.actual = testSource.actual[:4]
+ // Note the list must be sorted.
+ testSource.actual = append(testSource.actual, after[0])
+ testSource.actual = append(testSource.actual, after...)
+
+ table = []testTable{
+ {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after adding middle entry: %v", err)
+ }
+ // This will be used in a later test.
+ oldTestData := testSource.actual
+
+ // Delete everything.
+ testSource.actual = testSource.actual[:0]
+ table = []testTable{
+ {20, 20, []byte{}, io.EOF},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after removing all entries: %v", err)
+ }
+ // Restore some of the data.
+ testSource.actual = oldTestData[:1]
+ table = []testTable{
+ {6, 20, testSource.actual[0].Buf[6:], nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after adding first entry back: %v", err)
+ }
+
+ // Re-extend the data
+ testSource.actual = oldTestData
+ table = []testTable{
+ {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
+ }
+ if err := runTableTests(ctx, table, n2); err != nil {
+ t.Errorf("runTableTest failed after extending testSource: %v", err)
+ }
+}