From 74bc6e56ccd999998805e142622314f86dce770c Mon Sep 17 00:00:00 2001 From: Ayush Ranjan Date: Wed, 18 Nov 2020 23:08:04 -0800 Subject: [vfs] kernfs: Do not panic if destroyed dentry is cached. If a kernfs user does not cache dentries, then cacheLocked will destroy the dentry. The current DecRef implementation will be racy in this case as the following can happen: - Goroutine 1 calls DecRef and decreases ref count from 1 to 0. - Goroutine 2 acquires d.fs.mu for reading and calls IncRef and increasing the ref count from 0 to 1. - Goroutine 2 releases d.fs.mu and calls DecRef again decreasing ref count from 1 to 0. - Goroutine 1 now acquires d.fs.mu and calls cacheLocked which destroys the dentry. - Goroutine 2 now acquires d.fs.mu and calls cacheLocked to find that the dentry is already destroyed! Earlier we would panic in this case, we could instead just return instead of adding complexity to handle this race. This is similar to what the gofer client does. We do not want to lock d.fs.mu in the case that the filesystem caches dentries (common case as procfs and sysfs do this) to prevent congestion due to lock contention. PiperOrigin-RevId: 343229496 --- pkg/sentry/fsimpl/kernfs/kernfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sentry/fsimpl/kernfs/kernfs.go b/pkg/sentry/fsimpl/kernfs/kernfs.go index c14abcff4..565d723f0 100644 --- a/pkg/sentry/fsimpl/kernfs/kernfs.go +++ b/pkg/sentry/fsimpl/kernfs/kernfs.go @@ -286,7 +286,7 @@ func (d *Dentry) cacheLocked(ctx context.Context) { refs := atomic.LoadInt64(&d.refs) if refs == -1 { // Dentry has already been destroyed. - panic(fmt.Sprintf("cacheLocked called on a dentry which has already been destroyed: %v", d)) + return } if refs > 0 { if d.cached { -- cgit v1.2.3