summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-11-13 00:59:23 +0000
committergVisor bot <gvisor-bot@google.com>2020-11-13 00:59:23 +0000
commit8c35d50e11bb782d325a2e796ac8253df5cd0db6 (patch)
tree3bb390821b51d96057a19ecfe096b11f6b557221 /pkg/sentry
parent57a0a3653509689d4fd1959a4212b966617de96b (diff)
parentae7ab0a330aaa1676d1fe066e3f5ac5fe805ec1c (diff)
Merge release-20201030.0-73-gae7ab0a33 (automated)
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/fsimpl/gofer/filesystem.go80
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go5
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go24
3 files changed, 67 insertions, 42 deletions
diff --git a/pkg/sentry/fsimpl/gofer/filesystem.go b/pkg/sentry/fsimpl/gofer/filesystem.go
index 7ab298019..2294c490e 100644
--- a/pkg/sentry/fsimpl/gofer/filesystem.go
+++ b/pkg/sentry/fsimpl/gofer/filesystem.go
@@ -114,6 +114,51 @@ func putDentrySlice(ds *[]*dentry) {
dentrySlicePool.Put(ds)
}
+// renameMuRUnlockAndCheckCaching calls fs.renameMu.RUnlock(), then calls
+// dentry.checkCachingLocked on all dentries in *dsp with fs.renameMu locked
+// for writing.
+//
+// dsp is a pointer-to-pointer since defer evaluates its arguments immediately,
+// but dentry slices are allocated lazily, and it's much easier to say "defer
+// fs.renameMuRUnlockAndCheckCaching(&ds)" than "defer func() {
+// fs.renameMuRUnlockAndCheckCaching(ds) }()" to work around this.
+func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, dsp **[]*dentry) {
+ fs.renameMu.RUnlock()
+ if *dsp == nil {
+ return
+ }
+ ds := **dsp
+ // Only go through calling dentry.checkCachingLocked() (which requires
+ // re-locking renameMu) if we actually have any dentries with zero refs.
+ checkAny := false
+ for i := range ds {
+ if atomic.LoadInt64(&ds[i].refs) == 0 {
+ checkAny = true
+ break
+ }
+ }
+ if checkAny {
+ fs.renameMu.Lock()
+ for _, d := range ds {
+ d.checkCachingLocked(ctx)
+ }
+ fs.renameMu.Unlock()
+ }
+ putDentrySlice(*dsp)
+}
+
+func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) {
+ if *ds == nil {
+ fs.renameMu.Unlock()
+ return
+ }
+ for _, d := range **ds {
+ d.checkCachingLocked(ctx)
+ }
+ fs.renameMu.Unlock()
+ putDentrySlice(*ds)
+}
+
// stepLocked resolves rp.Component() to an existing file, starting from the
// given directory.
//
@@ -651,41 +696,6 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
return nil
}
-// renameMuRUnlockAndCheckCaching calls fs.renameMu.RUnlock(), then calls
-// dentry.checkCachingLocked on all dentries in *ds with fs.renameMu locked for
-// writing.
-//
-// ds is a pointer-to-pointer since defer evaluates its arguments immediately,
-// but dentry slices are allocated lazily, and it's much easier to say "defer
-// fs.renameMuRUnlockAndCheckCaching(&ds)" than "defer func() {
-// fs.renameMuRUnlockAndCheckCaching(ds) }()" to work around this.
-func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) {
- fs.renameMu.RUnlock()
- if *ds == nil {
- return
- }
- if len(**ds) != 0 {
- fs.renameMu.Lock()
- for _, d := range **ds {
- d.checkCachingLocked(ctx)
- }
- fs.renameMu.Unlock()
- }
- putDentrySlice(*ds)
-}
-
-func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) {
- if *ds == nil {
- fs.renameMu.Unlock()
- return
- }
- for _, d := range **ds {
- d.checkCachingLocked(ctx)
- }
- fs.renameMu.Unlock()
- putDentrySlice(*ds)
-}
-
// AccessAt implements vfs.Filesystem.Impl.AccessAt.
func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
var ds *[]*dentry
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index 53bcc9986..75a836899 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -1352,6 +1352,11 @@ func (d *dentry) checkCachingLocked(ctx context.Context) {
}
if refs > 0 {
if d.cached {
+ // This isn't strictly necessary (fs.cachedDentries is permitted to
+ // contain dentries with non-zero refs, which are skipped by
+ // fs.evictCachedDentryLocked() upon reaching the end of the LRU),
+ // but since we are already holding fs.renameMu for writing we may
+ // as well.
d.fs.cachedDentries.Remove(d)
d.fs.cachedDentriesLen--
d.cached = false
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index bc07d72c0..d55bdc97f 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -78,26 +78,36 @@ func putDentrySlice(ds *[]*dentry) {
}
// renameMuRUnlockAndCheckDrop calls fs.renameMu.RUnlock(), then calls
-// dentry.checkDropLocked on all dentries in *ds with fs.renameMu locked for
+// dentry.checkDropLocked on all dentries in *dsp with fs.renameMu locked for
// writing.
//
-// ds is a pointer-to-pointer since defer evaluates its arguments immediately,
+// dsp is a pointer-to-pointer since defer evaluates its arguments immediately,
// but dentry slices are allocated lazily, and it's much easier to say "defer
// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
-func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
+func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
- if *ds == nil {
+ if *dsp == nil {
return
}
- if len(**ds) != 0 {
+ ds := **dsp
+ // Only go through calling dentry.checkDropLocked() (which requires
+ // re-locking renameMu) if we actually have any dentries with zero refs.
+ checkAny := false
+ for i := range ds {
+ if atomic.LoadInt64(&ds[i].refs) == 0 {
+ checkAny = true
+ break
+ }
+ }
+ if checkAny {
fs.renameMu.Lock()
- for _, d := range **ds {
+ for _, d := range ds {
d.checkDropLocked(ctx)
}
fs.renameMu.Unlock()
}
- putDentrySlice(*ds)
+ putDentrySlice(*dsp)
}
func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {