diff options
author | Nicolas Lacasse <nlacasse@google.com> | 2019-01-14 20:33:29 -0800 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2019-01-14 20:34:28 -0800 |
commit | dc8450b5676d4c4ac9bcfa23cabd862e0060527d (patch) | |
tree | a4ef1ad59764f46f674b7003221ba8ae399b9e65 /pkg/sentry/fs/proc/seqfile | |
parent | 343ebe9789087b099ea7feae19879f5c24e59bf1 (diff) |
Remove fs.Handle, ramfs.Entry, and all the DeprecatedFileOperations.
More helper structs have been added to the fsutil package to make it easier to
implement fs.InodeOperations and fs.FileOperations.
PiperOrigin-RevId: 229305982
Change-Id: Ib6f8d3862f4216745116857913dbfa351530223b
Diffstat (limited to 'pkg/sentry/fs/proc/seqfile')
-rw-r--r-- | pkg/sentry/fs/proc/seqfile/BUILD | 7 | ||||
-rw-r--r-- | pkg/sentry/fs/proc/seqfile/seqfile.go | 133 | ||||
-rw-r--r-- | pkg/sentry/fs/proc/seqfile/seqfile_test.go | 45 |
3 files changed, 119 insertions, 66 deletions
diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD index 53c475652..b4ba64e10 100644 --- a/pkg/sentry/fs/proc/seqfile/BUILD +++ b/pkg/sentry/fs/proc/seqfile/BUILD @@ -8,12 +8,15 @@ go_library( importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile", visibility = ["//pkg/sentry:internal"], deps = [ + "//pkg/abi/linux", "//pkg/sentry/context", "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", "//pkg/sentry/fs/proc/device", - "//pkg/sentry/fs/ramfs", "//pkg/sentry/kernel/time", "//pkg/sentry/usermem", + "//pkg/syserror", + "//pkg/waiter", ], ) @@ -26,7 +29,7 @@ go_test( "//pkg/sentry/context", "//pkg/sentry/context/contexttest", "//pkg/sentry/fs", - "//pkg/sentry/fs/ramfs/test", + "//pkg/sentry/fs/ramfs", "//pkg/sentry/usermem", ], ) diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go index 0499ba65b..16fc6789e 100644 --- a/pkg/sentry/fs/proc/seqfile/seqfile.go +++ b/pkg/sentry/fs/proc/seqfile/seqfile.go @@ -18,12 +18,15 @@ import ( "io" "sync" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" - "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" ) // SeqHandle is a helper handle to seek in the file. @@ -87,7 +90,18 @@ func (s *SeqGenerationCounter) IsCurrent(generation int64) bool { // // +stateify savable type SeqFile struct { - ramfs.Entry + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleExtendedAttributes + fsutil.InodeSimpleAttributes // mu protects the fields below. mu sync.Mutex `state:"nosave"` @@ -99,11 +113,14 @@ type SeqFile struct { lastRead int64 } +var _ fs.InodeOperations = (*SeqFile)(nil) + // NewSeqFile returns a seqfile suitable for use by external consumers. func NewSeqFile(ctx context.Context, source SeqSource) *SeqFile { - s := &SeqFile{SeqSource: source} - s.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444)) - return s + return &SeqFile{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + SeqSource: source, + } } // NewSeqFileInode returns an Inode with SeqFile InodeOperations. @@ -120,11 +137,19 @@ func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource // UnstableAttr returns unstable attributes of the SeqFile. func (s *SeqFile) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { - uattr, _ := s.Entry.UnstableAttr(ctx, inode) + uattr, err := s.InodeSimpleAttributes.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } uattr.ModificationTime = ktime.NowFromContext(ctx) return uattr, nil } +// GetFile implements fs.InodeOperations.GetFile. +func (s *SeqFile) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &seqFileOperations{seqFile: s}), nil +} + // findIndexAndOffset finds the unit that corresponds to a certain offset. // Returns the unit and the offset within the unit. If there are not enough // units len(data) and leftover offset is returned. @@ -139,36 +164,74 @@ func findIndexAndOffset(data []SeqData, offset int64) (int, int64) { return len(data), offset } -// DeprecatedPreadv reads from the file at the given offset. -func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { - s.mu.Lock() - defer s.mu.Unlock() +// updateSourceLocked requires that s.mu is held. +func (s *SeqFile) updateSourceLocked(ctx context.Context, record int) { + var h SeqHandle + if record == 0 { + h = nil + } else { + h = s.source[record-1].Handle + } + // Save what we have previously read. + s.source = s.source[:record] + var newSource []SeqData + newSource, s.generation = s.SeqSource.ReadSeqFileData(ctx, h) + s.source = append(s.source, newSource...) +} + +// seqFileOperations implements fs.FileOperations. +// +// +stateify savable +type seqFileOperations struct { + waiter.AlwaysReady `state:"nosave"` + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + + seqFile *SeqFile +} + +var _ fs.FileOperations = (*seqFileOperations)(nil) + +// Write implements fs.FileOperations.Write. +func (*seqFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EACCES +} + +// Read implements fs.FileOperations.Read. +func (sfo *seqFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + sfo.seqFile.mu.Lock() + defer sfo.seqFile.mu.Unlock() - s.Entry.NotifyAccess(ctx) - defer func() { s.lastRead = offset }() + sfo.seqFile.NotifyAccess(ctx) + defer func() { sfo.seqFile.lastRead = offset }() updated := false // Try to find where we should start reading this file. - i, recordOffset := findIndexAndOffset(s.source, offset) - if i == len(s.source) { + i, recordOffset := findIndexAndOffset(sfo.seqFile.source, offset) + if i == len(sfo.seqFile.source) { // Ok, we're at EOF. Let's first check to see if there might be // more data available to us. If there is more data, add it to // the end and try reading again. - if !s.SeqSource.NeedsUpdate(s.generation) { + if !sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) { return 0, io.EOF } - oldLen := len(s.source) - s.updateSourceLocked(ctx, len(s.source)) + oldLen := len(sfo.seqFile.source) + sfo.seqFile.updateSourceLocked(ctx, len(sfo.seqFile.source)) updated = true // We know that we had consumed everything up until this point // so we search in the new slice instead of starting over. - i, recordOffset = findIndexAndOffset(s.source[oldLen:], recordOffset) + i, recordOffset = findIndexAndOffset(sfo.seqFile.source[oldLen:], recordOffset) i += oldLen // i is at most the length of the slice which is - // len(s.source) - oldLen. So at most i will be equal to - // len(s.source). - if i == len(s.source) { + // len(sfo.seqFile.source) - oldLen. So at most i will be equal to + // len(sfo.seqFile.source). + if i == len(sfo.seqFile.source) { return 0, io.EOF } } @@ -178,7 +241,7 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, // before continuing on to the next. We don't refresh our data source // before this record is completed. if recordOffset != 0 { - n, err := dst.CopyOut(ctx, s.source[i].Buf[recordOffset:]) + n, err := dst.CopyOut(ctx, sfo.seqFile.source[i].Buf[recordOffset:]) done += int64(n) dst = dst.DropFirst(n) if dst.NumBytes() == 0 || err != nil { @@ -190,15 +253,15 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, // Next/New unit, update the source file if necessary. Make an extra // check to see if we've seeked backwards and if so always update our // data source. - if !updated && (s.SeqSource.NeedsUpdate(s.generation) || s.lastRead > offset) { - s.updateSourceLocked(ctx, i) + if !updated && (sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) || sfo.seqFile.lastRead > offset) { + sfo.seqFile.updateSourceLocked(ctx, i) // recordOffset is 0 here and we won't update records behind the // current one so recordOffset is still 0 even though source // just got updated. Just read the next record. } // Finish by reading all the available data. - for _, buf := range s.source[i:] { + for _, buf := range sfo.seqFile.source[i:] { n, err := dst.CopyOut(ctx, buf.Buf) done += int64(n) dst = dst.DropFirst(n) @@ -214,23 +277,3 @@ func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, } return done, nil } - -// updateSourceLocked requires that s.mu is held. -func (s *SeqFile) updateSourceLocked(ctx context.Context, record int) { - var h SeqHandle - if record == 0 { - h = nil - } else { - h = s.source[record-1].Handle - } - // Save what we have previously read. - s.source = s.source[:record] - var newSource []SeqData - newSource, s.generation = s.SeqSource.ReadSeqFileData(ctx, h) - s.source = append(s.source, newSource...) -} - -// DeprecatedPwritev is always denied. -func (*SeqFile) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { - return 0, ramfs.ErrDenied -} diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go index f9a2ca38e..35403ab7f 100644 --- a/pkg/sentry/fs/proc/seqfile/seqfile_test.go +++ b/pkg/sentry/fs/proc/seqfile/seqfile_test.go @@ -23,7 +23,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" "gvisor.googlesource.com/gvisor/pkg/sentry/fs" - ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" ) @@ -91,10 +91,15 @@ type testTable struct { expectedError error } -func runTableTests(ctx context.Context, table []testTable, n fs.InodeOperations) error { +func runTableTests(ctx context.Context, table []testTable, dirent *fs.Dirent) error { for _, tt := range table { + file, err := dirent.Inode.InodeOperations.GetFile(ctx, dirent, fs.FileFlags{Read: true}) + if err != nil { + return fmt.Errorf("GetFile returned error: %v", err) + } + data := make([]byte, tt.readBufferSize) - resultLen, err := n.DeprecatedPreadv(ctx, usermem.BytesIOSequence(data), tt.offset) + resultLen, err := file.Preadv(ctx, usermem.BytesIOSequence(data), tt.offset) if err != tt.expectedError { return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError) } @@ -115,12 +120,12 @@ func TestSeqFile(t *testing.T) { testSource.Init() // Create a file that can be R/W. - m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + m := fs.NewPseudoMountSource() ctx := contexttest.Context(t) contents := map[string]*fs.Inode{ "foo": NewSeqFileInode(ctx, testSource, m), } - root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777)) + root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777)) // How about opening it? inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory}) @@ -129,9 +134,13 @@ func TestSeqFile(t *testing.T) { t.Fatalf("failed to walk to foo for n2: %v", err) } n2 := dirent2.Inode.InodeOperations + file2, err := n2.GetFile(ctx, dirent2, fs.FileFlags{Read: true, Write: true}) + if err != nil { + t.Fatalf("GetFile returned error: %v", err) + } // Writing? - if _, err := n2.DeprecatedPwritev(nil, usermem.BytesIOSequence([]byte("test")), 0); err == nil { + if _, err := file2.Writev(ctx, usermem.BytesIOSequence([]byte("test"))); err == nil { t.Fatalf("managed to write to n2: %v", err) } @@ -141,7 +150,6 @@ func TestSeqFile(t *testing.T) { t.Fatalf("failed to walk to foo: %v", err) } n3 := dirent3.Inode.InodeOperations - if n2 != n3 { t.Error("got n2 != n3, want same") } @@ -170,13 +178,13 @@ func TestSeqFile(t *testing.T) { // Read the last 3 bytes. {97, 10, testSource.actual[9].Buf[7:], nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err) } // Disable updates and do it again. testSource.update = false - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err) } } @@ -188,25 +196,24 @@ func TestSeqFileFileUpdated(t *testing.T) { testSource.update = true // Create a file that can be R/W. - m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + m := fs.NewPseudoMountSource() ctx := contexttest.Context(t) contents := map[string]*fs.Inode{ "foo": NewSeqFileInode(ctx, testSource, m), } - root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777)) + root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777)) // How about opening it? inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory}) dirent2, err := root.Lookup(ctx, inode, "foo") if err != nil { - t.Fatalf("failed to walk to foo for n2: %v", err) + t.Fatalf("failed to walk to foo for dirent2: %v", err) } - n2 := dirent2.Inode.InodeOperations table := []testTable{ {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed: %v", err) } // Delete the first entry. @@ -224,7 +231,7 @@ func TestSeqFileFileUpdated(t *testing.T) { // Read the following two lines. {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed after removing first entry: %v", err) } @@ -238,7 +245,7 @@ func TestSeqFileFileUpdated(t *testing.T) { table = []testTable{ {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed after adding middle entry: %v", err) } // This will be used in a later test. @@ -249,7 +256,7 @@ func TestSeqFileFileUpdated(t *testing.T) { table = []testTable{ {20, 20, []byte{}, io.EOF}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed after removing all entries: %v", err) } // Restore some of the data. @@ -257,7 +264,7 @@ func TestSeqFileFileUpdated(t *testing.T) { table = []testTable{ {6, 20, testSource.actual[0].Buf[6:], nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed after adding first entry back: %v", err) } @@ -266,7 +273,7 @@ func TestSeqFileFileUpdated(t *testing.T) { table = []testTable{ {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, } - if err := runTableTests(ctx, table, n2); err != nil { + if err := runTableTests(ctx, table, dirent2); err != nil { t.Errorf("runTableTest failed after extending testSource: %v", err) } } |