summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fsimpl/gofer
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-10-24 14:53:42 +0000
committergVisor bot <gvisor-bot@google.com>2020-10-24 14:53:42 +0000
commit98e3799f1ea42b4121c8903fa610d76764227726 (patch)
tree73921fabe8a944d1e50ca07c96a7193a5f48b206 /pkg/sentry/fsimpl/gofer
parent2d3d2cdb9006ad504d8f4882855ac3981783dc5f (diff)
parent4feb5c7c263de2310608d1a0e608d4ffd5e2990f (diff)
Merge release-20201019.0-58-g4feb5c7c2 (automated)
Diffstat (limited to 'pkg/sentry/fsimpl/gofer')
-rw-r--r--pkg/sentry/fsimpl/gofer/directory.go4
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go90
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer_state_autogen.go3
-rw-r--r--pkg/sentry/fsimpl/gofer/save_restore.go8
4 files changed, 95 insertions, 10 deletions
diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go
index c3af30f49..e993c8e36 100644
--- a/pkg/sentry/fsimpl/gofer/directory.go
+++ b/pkg/sentry/fsimpl/gofer/directory.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/p9"
+ "gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
@@ -100,6 +101,9 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) {
hostFD: -1,
nlink: uint32(2),
}
+ if refsvfs2.LeakCheckEnabled() {
+ refsvfs2.Register(child, "gofer.dentry")
+ }
switch opts.mode.FileType() {
case linux.S_IFDIR:
// Nothing else needs to be done.
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index f7c94cce1..604c6d7cd 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -46,6 +46,8 @@ import (
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
+ refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+ "gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -109,8 +111,8 @@ type filesystem struct {
// cachedDentries contains all dentries with 0 references. (Due to race
// conditions, it may also contain dentries with non-zero references.)
- // cachedDentriesLen is the number of dentries in cachedDentries. These
- // fields are protected by renameMu.
+ // cachedDentriesLen is the number of dentries in cachedDentries. These fields
+ // are protected by renameMu.
cachedDentries dentryList
cachedDentriesLen uint64
@@ -134,6 +136,10 @@ type filesystem struct {
// savedDentryRW records open read/write handles during save/restore.
savedDentryRW map[*dentry]savedDentryRW
+
+ // released is nonzero once filesystem.Release has been called. It is accessed
+ // with atomic memory operations.
+ released int32
}
// +stateify savable
@@ -454,9 +460,8 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
return nil, nil, err
}
// Set the root's reference count to 2. One reference is returned to the
- // caller, and the other is deliberately leaked to prevent the root from
- // being "cached" and subsequently evicted. Its resources will still be
- // cleaned up by fs.Release().
+ // caller, and the other is held by fs to prevent the root from being "cached"
+ // and subsequently evicted.
root.refs = 2
fs.root = root
@@ -526,15 +531,16 @@ func (fs *filesystem) dial(ctx context.Context) error {
// Release implements vfs.FilesystemImpl.Release.
func (fs *filesystem) Release(ctx context.Context) {
- mf := fs.mfp.MemoryFile()
+ atomic.StoreInt32(&fs.released, 1)
+ mf := fs.mfp.MemoryFile()
fs.syncMu.Lock()
for d := range fs.syncableDentries {
d.handleMu.Lock()
d.dataMu.Lock()
if h := d.writeHandleLocked(); h.isOpen() {
// Write dirty cached data to the remote file.
- if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size, fs.mfp.MemoryFile(), h.writeFromBlocksAt); err != nil {
+ if err := fsutil.SyncDirtyAll(ctx, &d.cache, &d.dirty, d.size, mf, h.writeFromBlocksAt); err != nil {
log.Warningf("gofer.filesystem.Release: failed to flush dentry: %v", err)
}
// TODO(jamieliu): Do we need to flushf/fsync d?
@@ -555,6 +561,21 @@ func (fs *filesystem) Release(ctx context.Context) {
// fs.
fs.syncMu.Unlock()
+ // If leak checking is enabled, release all outstanding references in the
+ // filesystem. We deliberately avoid doing this outside of leak checking; we
+ // have released all external resources above rather than relying on dentry
+ // destructors.
+ if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking {
+ fs.renameMu.Lock()
+ fs.root.releaseSyntheticRecursiveLocked(ctx)
+ fs.evictAllCachedDentriesLocked(ctx)
+ fs.renameMu.Unlock()
+
+ // An extra reference was held by the filesystem on the root to prevent it from
+ // being cached/evicted.
+ fs.root.DecRef(ctx)
+ }
+
if !fs.iopts.LeakConnection {
// Close the connection to the server. This implicitly clunks all fids.
fs.client.Close()
@@ -563,6 +584,31 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
}
+// releaseSyntheticRecursiveLocked traverses the tree with root d and decrements
+// the reference count on every synthetic dentry. Synthetic dentries have one
+// reference for existence that should be dropped during filesystem.Release.
+//
+// Precondition: d.fs.renameMu is locked.
+func (d *dentry) releaseSyntheticRecursiveLocked(ctx context.Context) {
+ if d.isSynthetic() {
+ d.decRefLocked()
+ d.checkCachingLocked(ctx)
+ }
+ if d.isDir() {
+ var children []*dentry
+ d.dirMu.Lock()
+ for _, child := range d.children {
+ children = append(children, child)
+ }
+ d.dirMu.Unlock()
+ for _, child := range children {
+ if child != nil {
+ child.releaseSyntheticRecursiveLocked(ctx)
+ }
+ }
+ }
+}
+
// dentry implements vfs.DentryImpl.
//
// +stateify savable
@@ -815,6 +861,9 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma
d.nlink = uint32(attr.NLink)
}
d.vfsd.Init(d)
+ if refsvfs2.LeakCheckEnabled() {
+ refsvfs2.Register(d, "gofer.dentry")
+ }
fs.syncMu.Lock()
fs.syncableDentries[d] = struct{}{}
@@ -1210,6 +1259,11 @@ func (d *dentry) decRefLocked() {
}
}
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (d *dentry) LeakMessage() string {
+ return fmt.Sprintf("[gofer.dentry %p] reference count of %d instead of -1", d, atomic.LoadInt64(&d.refs))
+}
+
// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {
if d.isDir() {
@@ -1292,6 +1346,16 @@ func (d *dentry) checkCachingLocked(ctx context.Context) {
if d.watches.Size() > 0 {
return
}
+
+ if atomic.LoadInt32(&d.fs.released) != 0 {
+ if d.parent != nil {
+ d.parent.dirMu.Lock()
+ delete(d.parent.children, d.name)
+ d.parent.dirMu.Unlock()
+ }
+ d.destroyLocked(ctx)
+ }
+
// If d is already cached, just move it to the front of the LRU.
if d.cached {
d.fs.cachedDentries.Remove(d)
@@ -1310,6 +1374,14 @@ func (d *dentry) checkCachingLocked(ctx context.Context) {
}
}
+// Precondition: fs.renameMu must be locked for writing; it may be temporarily
+// unlocked.
+func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {
+ for fs.cachedDentriesLen != 0 {
+ fs.evictCachedDentryLocked(ctx)
+ }
+}
+
// Preconditions:
// * fs.renameMu must be locked for writing; it may be temporarily unlocked.
// * fs.cachedDentriesLen != 0.
@@ -1422,6 +1494,10 @@ func (d *dentry) destroyLocked(ctx context.Context) {
panic("gofer.dentry.DecRef() called without holding a reference")
}
}
+
+ if refsvfs2.LeakCheckEnabled() {
+ refsvfs2.Unregister(d, "gofer.dentry")
+ }
}
func (d *dentry) isDeleted() bool {
diff --git a/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
index 289c3d6d4..26d0ab4db 100644
--- a/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
+++ b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
@@ -128,6 +128,7 @@ func (fs *filesystem) StateFields() []string {
"specialFileFDs",
"lastIno",
"savedDentryRW",
+ "released",
}
}
@@ -148,6 +149,7 @@ func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(10, &fs.specialFileFDs)
stateSinkObject.Save(11, &fs.lastIno)
stateSinkObject.Save(12, &fs.savedDentryRW)
+ stateSinkObject.Save(13, &fs.released)
}
func (fs *filesystem) afterLoad() {}
@@ -166,6 +168,7 @@ func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
stateSourceObject.Load(10, &fs.specialFileFDs)
stateSourceObject.Load(11, &fs.lastIno)
stateSourceObject.Load(12, &fs.savedDentryRW)
+ stateSourceObject.Load(13, &fs.released)
}
func (f *filesystemOptions) StateTypeName() string {
diff --git a/pkg/sentry/fsimpl/gofer/save_restore.go b/pkg/sentry/fsimpl/gofer/save_restore.go
index e995619a6..2ea224c43 100644
--- a/pkg/sentry/fsimpl/gofer/save_restore.go
+++ b/pkg/sentry/fsimpl/gofer/save_restore.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/p9"
+ "gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserror"
@@ -53,9 +54,7 @@ func (fs *filesystem) PrepareSave(ctx context.Context) error {
// Purge cached dentries, which may not be reopenable after restore due to
// permission changes.
fs.renameMu.Lock()
- for fs.cachedDentriesLen != 0 {
- fs.evictCachedDentryLocked(ctx)
- }
+ fs.evictAllCachedDentriesLocked(ctx)
fs.renameMu.Unlock()
// Buffer pipe data so that it's available for reading after restore. (This
@@ -141,6 +140,9 @@ func (d *dentry) beforeSave() {
// afterLoad is invoked by stateify.
func (d *dentry) afterLoad() {
d.hostFD = -1
+ if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 {
+ refsvfs2.Register(d, "gofer.dentry")
+ }
}
// afterLoad is invoked by stateify.