diff options
author | Dean Deng <deandeng@google.com> | 2020-10-28 18:16:30 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-10-28 18:23:29 -0700 |
commit | 3b4674ffe0e6ef1b016333ee726293ecf70c4e4e (patch) | |
tree | 5a0147bdaa1b75cf933fc39ff2118fd553d878cb /pkg | |
parent | 906f912b7c9484fd0028224a24055b887d4f84d2 (diff) |
Add logging option to leak checker.
Also refactor the template and CheckedObject interface to make this cleaner.
Updates #1486.
PiperOrigin-RevId: 339577120
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/refs/refcounter.go | 10 | ||||
-rw-r--r-- | pkg/refsvfs2/BUILD | 3 | ||||
-rw-r--r-- | pkg/refsvfs2/refs_map.go | 93 | ||||
-rw-r--r-- | pkg/refsvfs2/refs_template.go | 56 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/gofer/directory.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/gofer/filesystem.go | 8 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/gofer/gofer.go | 58 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/gofer/save_restore.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/overlay/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/overlay/overlay.go | 53 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/overlay/save_restore.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/verity/save_restore.go | 4 | ||||
-rw-r--r-- | pkg/sentry/fsimpl/verity/verity.go | 112 | ||||
-rw-r--r-- | pkg/sentry/vfs/mount.go | 31 | ||||
-rw-r--r-- | pkg/sentry/vfs/save_restore.go | 4 |
15 files changed, 282 insertions, 163 deletions
diff --git a/pkg/refs/refcounter.go b/pkg/refs/refcounter.go index 699ea8ac3..6992e1de8 100644 --- a/pkg/refs/refcounter.go +++ b/pkg/refs/refcounter.go @@ -319,7 +319,8 @@ func makeStackKey(pcs []uintptr) stackKey { return key } -func recordStack() []uintptr { +// RecordStack constructs and returns the PCs on the current stack. +func RecordStack() []uintptr { pcs := make([]uintptr, maxStackFrames) n := runtime.Callers(1, pcs) if n == 0 { @@ -342,7 +343,8 @@ func recordStack() []uintptr { return v } -func formatStack(pcs []uintptr) string { +// FormatStack converts the given stack into a readable format. +func FormatStack(pcs []uintptr) string { frames := runtime.CallersFrames(pcs) var trace bytes.Buffer for { @@ -367,7 +369,7 @@ func (r *AtomicRefCount) finalize() { if n := r.ReadRefs(); n != 0 { msg := fmt.Sprintf("%sAtomicRefCount %p owned by %q garbage collected with ref count of %d (want 0)", note, r, r.name, n) if len(r.stack) != 0 { - msg += ":\nCaller:\n" + formatStack(r.stack) + msg += ":\nCaller:\n" + FormatStack(r.stack) } else { msg += " (enable trace logging to debug)" } @@ -392,7 +394,7 @@ func (r *AtomicRefCount) EnableLeakCheck(name string) { case NoLeakChecking: return case LeaksLogTraces: - r.stack = recordStack() + r.stack = RecordStack() } r.name = name runtime.SetFinalizer(r, (*AtomicRefCount).finalize) diff --git a/pkg/refsvfs2/BUILD b/pkg/refsvfs2/BUILD index 245e33d2d..bfa1daa10 100644 --- a/pkg/refsvfs2/BUILD +++ b/pkg/refsvfs2/BUILD @@ -8,6 +8,9 @@ go_template( srcs = [ "refs_template.go", ], + opt_consts = [ + "logTrace", + ], types = [ "T", ], diff --git a/pkg/refsvfs2/refs_map.go b/pkg/refsvfs2/refs_map.go index be75b0cc2..57938d2f0 100644 --- a/pkg/refsvfs2/refs_map.go +++ b/pkg/refsvfs2/refs_map.go @@ -37,61 +37,98 @@ var ( // CheckedObject represents a reference-counted object with an informative // leak detection message. type CheckedObject interface { + // RefType is the type of the reference-counted object. + RefType() string + // LeakMessage supplies a warning to be printed upon leak detection. LeakMessage() string + + // LogRefs indicates whether reference-related events should be logged. + LogRefs() bool } func init() { liveObjects = make(map[CheckedObject]struct{}) } -// LeakCheckEnabled returns whether leak checking is enabled. The following +// leakCheckEnabled returns whether leak checking is enabled. The following // functions should only be called if it returns true. -func LeakCheckEnabled() bool { +func leakCheckEnabled() bool { return refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking } // Register adds obj to the live object map. -func Register(obj CheckedObject, typ string) { - for _, str := range ignored { - if strings.Contains(typ, str) { - return +func Register(obj CheckedObject) { + if leakCheckEnabled() { + for _, str := range ignored { + if strings.Contains(obj.RefType(), str) { + return + } } + liveObjectsMu.Lock() + if _, ok := liveObjects[obj]; ok { + panic(fmt.Sprintf("Unexpected entry in leak checking map: reference %p already added", obj)) + } + liveObjects[obj] = struct{}{} + liveObjectsMu.Unlock() + logEvent(obj, "registered") } - liveObjectsMu.Lock() - if _, ok := liveObjects[obj]; ok { - panic(fmt.Sprintf("Unexpected entry in leak checking map: reference %p already added", obj)) - } - liveObjects[obj] = struct{}{} - liveObjectsMu.Unlock() } // Unregister removes obj from the live object map. -func Unregister(obj CheckedObject, typ string) { - liveObjectsMu.Lock() - defer liveObjectsMu.Unlock() - if _, ok := liveObjects[obj]; !ok { - for _, str := range ignored { - if strings.Contains(typ, str) { - return +func Unregister(obj CheckedObject) { + if leakCheckEnabled() { + liveObjectsMu.Lock() + defer liveObjectsMu.Unlock() + if _, ok := liveObjects[obj]; !ok { + for _, str := range ignored { + if strings.Contains(obj.RefType(), str) { + return + } } + panic(fmt.Sprintf("Expected to find entry in leak checking map for reference %p", obj)) } - panic(fmt.Sprintf("Expected to find entry in leak checking map for reference %p", obj)) + delete(liveObjects, obj) + logEvent(obj, "unregistered") + } +} + +// LogIncRef logs a reference increment. +func LogIncRef(obj CheckedObject, refs int64) { + logEvent(obj, fmt.Sprintf("IncRef to %d", refs)) +} + +// LogTryIncRef logs a successful TryIncRef call. +func LogTryIncRef(obj CheckedObject, refs int64) { + logEvent(obj, fmt.Sprintf("TryIncRef to %d", refs)) +} + +// LogDecRef logs a reference decrement. +func LogDecRef(obj CheckedObject, refs int64) { + logEvent(obj, fmt.Sprintf("DecRef to %d", refs)) +} + +// logEvent logs a message for the given reference-counted object. +func logEvent(obj CheckedObject, msg string) { + if obj.LogRefs() { + log.Infof("[%s %p] %s:", obj.RefType(), obj, msg) + log.Infof(refs_vfs1.FormatStack(refs_vfs1.RecordStack())) } - delete(liveObjects, obj) } // DoLeakCheck iterates through the live object map and logs a message for each // object. It is called once no reference-counted objects should be reachable // anymore, at which point anything left in the map is considered a leak. func DoLeakCheck() { - liveObjectsMu.Lock() - defer liveObjectsMu.Unlock() - leaked := len(liveObjects) - if leaked > 0 { - log.Warningf("Leak checking detected %d leaked objects:", leaked) - for obj := range liveObjects { - log.Warningf(obj.LeakMessage()) + if leakCheckEnabled() { + liveObjectsMu.Lock() + defer liveObjectsMu.Unlock() + leaked := len(liveObjects) + if leaked > 0 { + log.Warningf("Leak checking detected %d leaked objects:", leaked) + for obj := range liveObjects { + log.Warningf(obj.LeakMessage()) + } } } } diff --git a/pkg/refsvfs2/refs_template.go b/pkg/refsvfs2/refs_template.go index ec295ef5b..8f50b4ee6 100644 --- a/pkg/refsvfs2/refs_template.go +++ b/pkg/refsvfs2/refs_template.go @@ -26,13 +26,19 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const enableLogging = false + // T is the type of the reference counted object. It is only used to customize // debug output when leak checking. type T interface{} -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var ownerType *T +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var obj *T // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -52,16 +58,24 @@ type Refs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *Refs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", ownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *Refs) RefType() string { + return fmt.Sprintf("%T", obj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *Refs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", ownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *Refs) LogRefs() bool { + return enableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *Refs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -75,8 +89,10 @@ func (r *Refs) ReadRefs() int64 { // //go:nosplit func (r *Refs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, ownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -89,15 +105,15 @@ func (r *Refs) IncRef() { //go:nosplit func (r *Refs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { // This object has already been freed. atomic.AddInt64(&r.refCount, -speculativeRef) return false } // Turn into a real reference. - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -114,14 +130,14 @@ func (r *Refs) TryIncRef() bool { // //go:nosplit func (r *Refs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, ownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", ownerType)) - } + refsvfs2.Unregister(r) // Call the destructor. if destroy != nil { destroy() @@ -130,7 +146,7 @@ func (r *Refs) DecRef(destroy func()) { } func (r *Refs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go index e993c8e36..ce1b2a390 100644 --- a/pkg/sentry/fsimpl/gofer/directory.go +++ b/pkg/sentry/fsimpl/gofer/directory.go @@ -101,9 +101,7 @@ func (d *dentry) createSyntheticChildLocked(opts *createSyntheticOpts) { hostFD: -1, nlink: uint32(2), } - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(child, "gofer.dentry") - } + refsvfs2.Register(child) switch opts.mode.FileType() { case linux.S_IFDIR: // Nothing else needs to be done. diff --git a/pkg/sentry/fsimpl/gofer/filesystem.go b/pkg/sentry/fsimpl/gofer/filesystem.go index baecb88c4..57a2ca43c 100644 --- a/pkg/sentry/fsimpl/gofer/filesystem.go +++ b/pkg/sentry/fsimpl/gofer/filesystem.go @@ -262,7 +262,7 @@ func (fs *filesystem) revalidateChildLocked(ctx context.Context, vfsObj *vfs.Vir // treat their invalidation as deletion. child.setDeleted() parent.syntheticChildren-- - child.decRefLocked() + child.decRefNoCaching() parent.dirents = nil } *ds = appendDentry(*ds, child) @@ -631,7 +631,7 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b child.setDeleted() if child.isSynthetic() { parent.syntheticChildren-- - child.decRefLocked() + child.decRefNoCaching() } ds = appendDentry(ds, child) } @@ -1361,7 +1361,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa replaced.setDeleted() if replaced.isSynthetic() { newParent.syntheticChildren-- - replaced.decRefLocked() + replaced.decRefNoCaching() } ds = appendDentry(ds, replaced) } @@ -1370,7 +1370,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa // with reference counts and queue oldParent for checkCachingLocked if the // parent isn't actually changing. if oldParent != newParent { - oldParent.decRefLocked() + oldParent.decRefNoCaching() ds = appendDentry(ds, oldParent) newParent.IncRef() if renamed.isSynthetic() { diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go index 80668ebc1..6f82ce61b 100644 --- a/pkg/sentry/fsimpl/gofer/gofer.go +++ b/pkg/sentry/fsimpl/gofer/gofer.go @@ -590,7 +590,7 @@ func (fs *filesystem) Release(ctx context.Context) { // Precondition: d.fs.renameMu is locked. func (d *dentry) releaseSyntheticRecursiveLocked(ctx context.Context) { if d.isSynthetic() { - d.decRefLocked() + d.decRefNoCaching() d.checkCachingLocked(ctx) } if d.isDir() { @@ -860,10 +860,7 @@ func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, ma d.nlink = uint32(attr.NLink) } d.vfsd.Init(d) - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(d, "gofer.dentry") - } - + refsvfs2.Register(d) fs.syncMu.Lock() fs.syncableDentries[d] = struct{}{} fs.syncMu.Unlock() @@ -1222,17 +1219,19 @@ func dentryGIDFromP9GID(gid p9.GID) uint32 { func (d *dentry) IncRef() { // d.refs may be 0 if d.fs.renameMu is locked, which serializes against // d.checkCachingLocked(). - atomic.AddInt64(&d.refs, 1) + r := atomic.AddInt64(&d.refs, 1) + refsvfs2.LogIncRef(d, r) } // TryIncRef implements vfs.DentryImpl.TryIncRef. func (d *dentry) TryIncRef() bool { for { - refs := atomic.LoadInt64(&d.refs) - if refs <= 0 { + r := atomic.LoadInt64(&d.refs) + if r <= 0 { return false } - if atomic.CompareAndSwapInt64(&d.refs, refs, refs+1) { + if atomic.CompareAndSwapInt64(&d.refs, r, r+1) { + refsvfs2.LogTryIncRef(d, r+1) return true } } @@ -1240,22 +1239,28 @@ func (d *dentry) TryIncRef() bool { // DecRef implements vfs.DentryImpl.DecRef. func (d *dentry) DecRef(ctx context.Context) { - if refs := atomic.AddInt64(&d.refs, -1); refs == 0 { + if d.decRefNoCaching() == 0 { d.fs.renameMu.Lock() d.checkCachingLocked(ctx) d.fs.renameMu.Unlock() - } else if refs < 0 { - panic("gofer.dentry.DecRef() called without holding a reference") } } -// decRefLocked decrements d's reference count without calling +// decRefNoCaching decrements d's reference count without calling // d.checkCachingLocked, even if d's reference count reaches 0; callers are // responsible for ensuring that d.checkCachingLocked will be called later. -func (d *dentry) decRefLocked() { - if refs := atomic.AddInt64(&d.refs, -1); refs < 0 { - panic("gofer.dentry.decRefLocked() called without holding a reference") +func (d *dentry) decRefNoCaching() int64 { + r := atomic.AddInt64(&d.refs, -1) + refsvfs2.LogDecRef(d, r) + if r < 0 { + panic("gofer.dentry.decRefNoCaching() called without holding a reference") } + return r +} + +// RefType implements refsvfs2.CheckedObject.Type. +func (d *dentry) RefType() string { + return "gofer.dentry" } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. @@ -1263,6 +1268,14 @@ func (d *dentry) LeakMessage() string { return fmt.Sprintf("[gofer.dentry %p] reference count of %d instead of -1", d, atomic.LoadInt64(&d.refs)) } +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +// +// This should only be set to true for debugging purposes, as it can generate an +// extremely large amount of output and drastically degrade performance. +func (d *dentry) LogRefs() bool { + return false +} + // InotifyWithParent implements vfs.DentryImpl.InotifyWithParent. func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) { if d.isDir() { @@ -1486,17 +1499,10 @@ func (d *dentry) destroyLocked(ctx context.Context) { // Drop the reference held by d on its parent without recursively locking // d.fs.renameMu. - if d.parent != nil { - if refs := atomic.AddInt64(&d.parent.refs, -1); refs == 0 { - d.parent.checkCachingLocked(ctx) - } else if refs < 0 { - panic("gofer.dentry.DecRef() called without holding a reference") - } - } - - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(d, "gofer.dentry") + if d.parent != nil && d.parent.decRefNoCaching() == 0 { + d.parent.checkCachingLocked(ctx) } + refsvfs2.Unregister(d) } func (d *dentry) isDeleted() bool { diff --git a/pkg/sentry/fsimpl/gofer/save_restore.go b/pkg/sentry/fsimpl/gofer/save_restore.go index 2ea224c43..17849dcc0 100644 --- a/pkg/sentry/fsimpl/gofer/save_restore.go +++ b/pkg/sentry/fsimpl/gofer/save_restore.go @@ -140,8 +140,8 @@ func (d *dentry) beforeSave() { // afterLoad is invoked by stateify. func (d *dentry) afterLoad() { d.hostFD = -1 - if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 { - refsvfs2.Register(d, "gofer.dentry") + if atomic.LoadInt64(&d.refs) != -1 { + refsvfs2.Register(d) } } diff --git a/pkg/sentry/fsimpl/overlay/BUILD b/pkg/sentry/fsimpl/overlay/BUILD index fd6c55921..bf13bbbf4 100644 --- a/pkg/sentry/fsimpl/overlay/BUILD +++ b/pkg/sentry/fsimpl/overlay/BUILD @@ -31,6 +31,7 @@ go_library( "//pkg/context", "//pkg/fspath", "//pkg/log", + "//pkg/refs", "//pkg/refsvfs2", "//pkg/sentry/arch", "//pkg/sentry/fs/lock", diff --git a/pkg/sentry/fsimpl/overlay/overlay.go b/pkg/sentry/fsimpl/overlay/overlay.go index c812f0a70..f6c58f2e7 100644 --- a/pkg/sentry/fsimpl/overlay/overlay.go +++ b/pkg/sentry/fsimpl/overlay/overlay.go @@ -505,9 +505,7 @@ func (fs *filesystem) newDentry() *dentry { } d.lowerVDs = d.inlineLowerVDs[:0] d.vfsd.Init(d) - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(d, "overlay.dentry") - } + refsvfs2.Register(d) return d } @@ -515,17 +513,19 @@ func (fs *filesystem) newDentry() *dentry { func (d *dentry) IncRef() { // d.refs may be 0 if d.fs.renameMu is locked, which serializes against // d.checkDropLocked(). - atomic.AddInt64(&d.refs, 1) + r := atomic.AddInt64(&d.refs, 1) + refsvfs2.LogIncRef(d, r) } // TryIncRef implements vfs.DentryImpl.TryIncRef. func (d *dentry) TryIncRef() bool { for { - refs := atomic.LoadInt64(&d.refs) - if refs <= 0 { + r := atomic.LoadInt64(&d.refs) + if r <= 0 { return false } - if atomic.CompareAndSwapInt64(&d.refs, refs, refs+1) { + if atomic.CompareAndSwapInt64(&d.refs, r, r+1) { + refsvfs2.LogTryIncRef(d, r+1) return true } } @@ -533,15 +533,27 @@ func (d *dentry) TryIncRef() bool { // DecRef implements vfs.DentryImpl.DecRef. func (d *dentry) DecRef(ctx context.Context) { - if refs := atomic.AddInt64(&d.refs, -1); refs == 0 { + r := atomic.AddInt64(&d.refs, -1) + refsvfs2.LogDecRef(d, r) + if r == 0 { d.fs.renameMu.Lock() d.checkDropLocked(ctx) d.fs.renameMu.Unlock() - } else if refs < 0 { + } else if r < 0 { panic("overlay.dentry.DecRef() called without holding a reference") } } +func (d *dentry) decRefLocked(ctx context.Context) { + r := atomic.AddInt64(&d.refs, -1) + refsvfs2.LogDecRef(d, r) + if r == 0 { + d.checkDropLocked(ctx) + } else if r < 0 { + panic("overlay.dentry.decRefLocked() called without holding a reference") + } +} + // checkDropLocked should be called after d's reference count becomes 0 or it // becomes deleted. // @@ -601,15 +613,14 @@ func (d *dentry) destroyLocked(ctx context.Context) { d.parent.dirMu.Unlock() // Drop the reference held by d on its parent without recursively // locking d.fs.renameMu. - if refs := atomic.AddInt64(&d.parent.refs, -1); refs == 0 { - d.parent.checkDropLocked(ctx) - } else if refs < 0 { - panic("overlay.dentry.DecRef() called without holding a reference") - } - } - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(d, "overlay.dentry") + d.parent.decRefLocked(ctx) } + refsvfs2.Unregister(d) +} + +// RefType implements refsvfs2.CheckedObject.Type. +func (d *dentry) RefType() string { + return "overlay.dentry" } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. @@ -617,6 +628,14 @@ func (d *dentry) LeakMessage() string { return fmt.Sprintf("[overlay.dentry %p] reference count of %d instead of -1", d, atomic.LoadInt64(&d.refs)) } +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +// +// This should only be set to true for debugging purposes, as it can generate an +// extremely large amount of output and drastically degrade performance. +func (d *dentry) LogRefs() bool { + return false +} + // InotifyWithParent implements vfs.DentryImpl.InotifyWithParent. func (d *dentry) InotifyWithParent(ctx context.Context, events uint32, cookie uint32, et vfs.EventType) { if d.isDir() { diff --git a/pkg/sentry/fsimpl/overlay/save_restore.go b/pkg/sentry/fsimpl/overlay/save_restore.go index 054e17b17..54809f16c 100644 --- a/pkg/sentry/fsimpl/overlay/save_restore.go +++ b/pkg/sentry/fsimpl/overlay/save_restore.go @@ -21,7 +21,7 @@ import ( ) func (d *dentry) afterLoad() { - if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 { - refsvfs2.Register(d, "overlay.dentry") + if atomic.LoadInt64(&d.refs) != -1 { + refsvfs2.Register(d) } } diff --git a/pkg/sentry/fsimpl/verity/save_restore.go b/pkg/sentry/fsimpl/verity/save_restore.go index 4a161163c..46b064342 100644 --- a/pkg/sentry/fsimpl/verity/save_restore.go +++ b/pkg/sentry/fsimpl/verity/save_restore.go @@ -21,7 +21,7 @@ import ( ) func (d *dentry) afterLoad() { - if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&d.refs) != -1 { - refsvfs2.Register(d, "verity.dentry") + if atomic.LoadInt64(&d.refs) != -1 { + refsvfs2.Register(d) } } diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go index 92ca6ca6b..de92878fd 100644 --- a/pkg/sentry/fsimpl/verity/verity.go +++ b/pkg/sentry/fsimpl/verity/verity.go @@ -43,32 +43,41 @@ import ( "gvisor.dev/gvisor/pkg/usermem" ) -// Name is the default filesystem name. -const Name = "verity" - -// merklePrefix is the prefix of the Merkle tree files. For example, the Merkle -// tree file for "/foo" is "/.merkle.verity.foo". -const merklePrefix = ".merkle.verity." - -// merkleoffsetInParentXattr is the extended attribute name specifying the -// offset of child hash in its parent's Merkle tree. -const merkleOffsetInParentXattr = "user.merkle.offset" - -// merkleSizeXattr is the extended attribute name specifying the size of data -// hashed by the corresponding Merkle tree. For a file, it's the size of the -// whole file. For a directory, it's the size of all its children's hashes. -const merkleSizeXattr = "user.merkle.size" +const ( + // Name is the default filesystem name. + Name = "verity" + + // merklePrefix is the prefix of the Merkle tree files. For example, the Merkle + // tree file for "/foo" is "/.merkle.verity.foo". + merklePrefix = ".merkle.verity." + + // merkleOffsetInParentXattr is the extended attribute name specifying the + // offset of the child hash in its parent's Merkle tree. + merkleOffsetInParentXattr = "user.merkle.offset" + + // merkleSizeXattr is the extended attribute name specifying the size of data + // hashed by the corresponding Merkle tree. For a regular file, this is the + // file size. For a directory, this is the size of all its children's hashes. + merkleSizeXattr = "user.merkle.size" + + // sizeOfStringInt32 is the size for a 32 bit integer stored as string in + // extended attributes. The maximum value of a 32 bit integer has 10 digits. + sizeOfStringInt32 = 10 +) -// sizeOfStringInt32 is the size for a 32 bit integer stored as string in -// extended attributes. The maximum value of a 32 bit integer is 10 digits. -const sizeOfStringInt32 = 10 +var ( + // noCrashOnVerificationFailure indicates whether the sandbox should panic + // whenever verification fails. If true, an error is returned instead of + // panicking. This should only be set for tests. + // + // TODO(b/165661693): Decide whether to panic or return error based on this + // flag. + noCrashOnVerificationFailure bool -// noCrashOnVerificationFailure indicates whether the sandbox should panic -// whenever verification fails. If true, an error is returned instead of -// panicking. This should only be set for tests. -// TOOD(b/165661693): Decide whether to panic or return error based on this -// flag. -var noCrashOnVerificationFailure bool + // verityMu synchronizes concurrent operations that enable verity and perform + // verification checks. + verityMu sync.RWMutex +) // FilesystemType implements vfs.FilesystemType. // @@ -334,25 +343,25 @@ func (fs *filesystem) newDentry() *dentry { fs: fs, } d.vfsd.Init(d) - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(d, "verity.dentry") - } + refsvfs2.Register(d) return d } // IncRef implements vfs.DentryImpl.IncRef. func (d *dentry) IncRef() { - atomic.AddInt64(&d.refs, 1) + r := atomic.AddInt64(&d.refs, 1) + refsvfs2.LogIncRef(d, r) } // TryIncRef implements vfs.DentryImpl.TryIncRef. func (d *dentry) TryIncRef() bool { for { - refs := atomic.LoadInt64(&d.refs) - if refs <= 0 { + r := atomic.LoadInt64(&d.refs) + if r <= 0 { return false } - if atomic.CompareAndSwapInt64(&d.refs, refs, refs+1) { + if atomic.CompareAndSwapInt64(&d.refs, r, r+1) { + refsvfs2.LogTryIncRef(d, r+1) return true } } @@ -360,15 +369,27 @@ func (d *dentry) TryIncRef() bool { // DecRef implements vfs.DentryImpl.DecRef. func (d *dentry) DecRef(ctx context.Context) { - if refs := atomic.AddInt64(&d.refs, -1); refs == 0 { + r := atomic.AddInt64(&d.refs, -1) + refsvfs2.LogDecRef(d, r) + if r == 0 { d.fs.renameMu.Lock() d.checkDropLocked(ctx) d.fs.renameMu.Unlock() - } else if refs < 0 { + } else if r < 0 { panic("verity.dentry.DecRef() called without holding a reference") } } +func (d *dentry) decRefLocked(ctx context.Context) { + r := atomic.AddInt64(&d.refs, -1) + refsvfs2.LogDecRef(d, r) + if r == 0 { + d.checkDropLocked(ctx) + } else if r < 0 { + panic("verity.dentry.decRefLocked() called without holding a reference") + } +} + // checkDropLocked should be called after d's reference count becomes 0 or it // becomes deleted. func (d *dentry) checkDropLocked(ctx context.Context) { @@ -399,26 +420,23 @@ func (d *dentry) destroyLocked(ctx context.Context) { if d.lowerVD.Ok() { d.lowerVD.DecRef(ctx) } - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(d, "verity.dentry") - } - if d.lowerMerkleVD.Ok() { d.lowerMerkleVD.DecRef(ctx) } - if d.parent != nil { d.parent.dirMu.Lock() if !d.vfsd.IsDead() { delete(d.parent.children, d.name) } d.parent.dirMu.Unlock() - if refs := atomic.AddInt64(&d.parent.refs, -1); refs == 0 { - d.parent.checkDropLocked(ctx) - } else if refs < 0 { - panic("verity.dentry.DecRef() called without holding a reference") - } + d.parent.decRefLocked(ctx) } + refsvfs2.Unregister(d) +} + +// RefType implements refsvfs2.CheckedObject.Type. +func (d *dentry) RefType() string { + return "verity.dentry" } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. @@ -426,6 +444,14 @@ func (d *dentry) LeakMessage() string { return fmt.Sprintf("[verity.dentry %p] reference count of %d instead of -1", d, atomic.LoadInt64(&d.refs)) } +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +// +// This should only be set to true for debugging purposes, as it can generate an +// extremely large amount of output and drastically degrade performance. +func (d *dentry) LogRefs() bool { + return false +} + // InotifyWithParent implements vfs.DentryImpl.InotifyWithParent. func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) { //TODO(b/159261227): Implement InotifyWithParent. diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go index d452d2cda..3ea981ad4 100644 --- a/pkg/sentry/vfs/mount.go +++ b/pkg/sentry/vfs/mount.go @@ -107,9 +107,7 @@ func newMount(vfs *VirtualFilesystem, fs *Filesystem, root *Dentry, mntns *Mount if opts.ReadOnly { mnt.setReadOnlyLocked(true) } - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(mnt, "vfs.Mount") - } + refsvfs2.Register(mnt) return mnt } @@ -474,11 +472,12 @@ func (vfs *VirtualFilesystem) disconnectLocked(mnt *Mount) VirtualDentry { // tryIncMountedRef does not require that a reference is held on mnt. func (mnt *Mount) tryIncMountedRef() bool { for { - refs := atomic.LoadInt64(&mnt.refs) - if refs <= 0 { // refs < 0 => MSB set => eagerly unmounted + r := atomic.LoadInt64(&mnt.refs) + if r <= 0 { // r < 0 => MSB set => eagerly unmounted return false } - if atomic.CompareAndSwapInt64(&mnt.refs, refs, refs+1) { + if atomic.CompareAndSwapInt64(&mnt.refs, r, r+1) { + refsvfs2.LogTryIncRef(mnt, r+1) return true } } @@ -488,16 +487,15 @@ func (mnt *Mount) tryIncMountedRef() bool { func (mnt *Mount) IncRef() { // In general, negative values for mnt.refs are valid because the MSB is // the eager-unmount bit. - atomic.AddInt64(&mnt.refs, 1) + r := atomic.AddInt64(&mnt.refs, 1) + refsvfs2.LogIncRef(mnt, r) } // DecRef decrements mnt's reference count. func (mnt *Mount) DecRef(ctx context.Context) { r := atomic.AddInt64(&mnt.refs, -1) if r&^math.MinInt64 == 0 { // mask out MSB - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(mnt, "vfs.Mount") - } + refsvfs2.Unregister(mnt) mnt.destroy(ctx) } } @@ -520,11 +518,24 @@ func (mnt *Mount) destroy(ctx context.Context) { } } +// RefType implements refsvfs2.CheckedObject.Type. +func (mnt *Mount) RefType() string { + return "vfs.Mount" +} + // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (mnt *Mount) LeakMessage() string { return fmt.Sprintf("[vfs.Mount %p] reference count of %d instead of 0", mnt, atomic.LoadInt64(&mnt.refs)) } +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +// +// This should only be set to true for debugging purposes, as it can generate an +// extremely large amount of output and drastically degrade performance. +func (mnt *Mount) LogRefs() bool { + return false +} + // DecRef decrements mntns' reference count. func (mntns *MountNamespace) DecRef(ctx context.Context) { vfs := mntns.root.fs.VirtualFilesystem() diff --git a/pkg/sentry/vfs/save_restore.go b/pkg/sentry/vfs/save_restore.go index 46e50d55d..7723ed643 100644 --- a/pkg/sentry/vfs/save_restore.go +++ b/pkg/sentry/vfs/save_restore.go @@ -111,8 +111,8 @@ func (vfs *VirtualFilesystem) loadMounts(mounts []*Mount) { } func (mnt *Mount) afterLoad() { - if refsvfs2.LeakCheckEnabled() && atomic.LoadInt64(&mnt.refs) != 0 { - refsvfs2.Register(mnt, "vfs.Mount") + if atomic.LoadInt64(&mnt.refs) != 0 { + refsvfs2.Register(mnt) } } |