diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-10-29 01:29:16 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-10-29 01:29:16 +0000 |
commit | a50cfffd384c07e11b5d3ad21f3a096bf1ff4521 (patch) | |
tree | 9a0f389bd874a44d32d9729c118e4f338d92d198 /pkg/sentry/kernel | |
parent | 0cc41c9357ae7c1db41ad74f2830253c42b007ce (diff) | |
parent | 3b4674ffe0e6ef1b016333ee726293ecf70c4e4e (diff) |
Merge release-20201019.0-95-g3b4674ffe (automated)
Diffstat (limited to 'pkg/sentry/kernel')
-rw-r--r-- | pkg/sentry/kernel/fd_table_refs.go | 56 | ||||
-rw-r--r-- | pkg/sentry/kernel/fs_context_refs.go | 56 | ||||
-rw-r--r-- | pkg/sentry/kernel/ipc_namespace_refs.go | 56 | ||||
-rw-r--r-- | pkg/sentry/kernel/process_group_refs.go | 56 | ||||
-rw-r--r-- | pkg/sentry/kernel/session_refs.go | 56 | ||||
-rw-r--r-- | pkg/sentry/kernel/shm/shm_refs.go | 56 |
6 files changed, 216 insertions, 120 deletions
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go index cbf2e85ed..992606f36 100644 --- a/pkg/sentry/kernel/fd_table_refs.go +++ b/pkg/sentry/kernel/fd_table_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var FDTableownerType *FDTable +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const FDTableenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var FDTableobj *FDTable // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type FDTableRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *FDTableRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", FDTableownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *FDTableRefs) RefType() string { + return fmt.Sprintf("%T", FDTableobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *FDTableRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", FDTableownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *FDTableRefs) LogRefs() bool { + return FDTableenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *FDTableRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *FDTableRefs) ReadRefs() int64 { // //go:nosplit func (r *FDTableRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, FDTableownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *FDTableRefs) IncRef() { //go:nosplit func (r *FDTableRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *FDTableRefs) TryIncRef() bool { // //go:nosplit func (r *FDTableRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, FDTableownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", FDTableownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *FDTableRefs) DecRef(destroy func()) { } func (r *FDTableRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go index 025f11faa..ff812ab16 100644 --- a/pkg/sentry/kernel/fs_context_refs.go +++ b/pkg/sentry/kernel/fs_context_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var FSContextownerType *FSContext +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const FSContextenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var FSContextobj *FSContext // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type FSContextRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *FSContextRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", FSContextownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *FSContextRefs) RefType() string { + return fmt.Sprintf("%T", FSContextobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *FSContextRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", FSContextownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *FSContextRefs) LogRefs() bool { + return FSContextenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *FSContextRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *FSContextRefs) ReadRefs() int64 { // //go:nosplit func (r *FSContextRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, FSContextownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *FSContextRefs) IncRef() { //go:nosplit func (r *FSContextRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *FSContextRefs) TryIncRef() bool { // //go:nosplit func (r *FSContextRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, FSContextownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", FSContextownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *FSContextRefs) DecRef(destroy func()) { } func (r *FSContextRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go index aec0f7a41..5b37e617a 100644 --- a/pkg/sentry/kernel/ipc_namespace_refs.go +++ b/pkg/sentry/kernel/ipc_namespace_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var IPCNamespaceownerType *IPCNamespace +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const IPCNamespaceenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var IPCNamespaceobj *IPCNamespace // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type IPCNamespaceRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *IPCNamespaceRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", IPCNamespaceownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *IPCNamespaceRefs) RefType() string { + return fmt.Sprintf("%T", IPCNamespaceobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *IPCNamespaceRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", IPCNamespaceownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *IPCNamespaceRefs) LogRefs() bool { + return IPCNamespaceenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *IPCNamespaceRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *IPCNamespaceRefs) ReadRefs() int64 { // //go:nosplit func (r *IPCNamespaceRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, IPCNamespaceownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *IPCNamespaceRefs) IncRef() { //go:nosplit func (r *IPCNamespaceRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *IPCNamespaceRefs) TryIncRef() bool { // //go:nosplit func (r *IPCNamespaceRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, IPCNamespaceownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", IPCNamespaceownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *IPCNamespaceRefs) DecRef(destroy func()) { } func (r *IPCNamespaceRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go index 1f4486817..29bd0b80f 100644 --- a/pkg/sentry/kernel/process_group_refs.go +++ b/pkg/sentry/kernel/process_group_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var ProcessGroupownerType *ProcessGroup +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const ProcessGroupenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var ProcessGroupobj *ProcessGroup // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type ProcessGroupRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *ProcessGroupRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", ProcessGroupownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *ProcessGroupRefs) RefType() string { + return fmt.Sprintf("%T", ProcessGroupobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *ProcessGroupRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", ProcessGroupownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *ProcessGroupRefs) LogRefs() bool { + return ProcessGroupenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *ProcessGroupRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *ProcessGroupRefs) ReadRefs() int64 { // //go:nosplit func (r *ProcessGroupRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, ProcessGroupownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *ProcessGroupRefs) IncRef() { //go:nosplit func (r *ProcessGroupRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *ProcessGroupRefs) TryIncRef() bool { // //go:nosplit func (r *ProcessGroupRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, ProcessGroupownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", ProcessGroupownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *ProcessGroupRefs) DecRef(destroy func()) { } func (r *ProcessGroupRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go index 86df93be1..430cb131c 100644 --- a/pkg/sentry/kernel/session_refs.go +++ b/pkg/sentry/kernel/session_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var SessionownerType *Session +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const SessionenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var Sessionobj *Session // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type SessionRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *SessionRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", SessionownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *SessionRefs) RefType() string { + return fmt.Sprintf("%T", Sessionobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *SessionRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", SessionownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *SessionRefs) LogRefs() bool { + return SessionenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *SessionRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *SessionRefs) ReadRefs() int64 { // //go:nosplit func (r *SessionRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, SessionownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *SessionRefs) IncRef() { //go:nosplit func (r *SessionRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *SessionRefs) TryIncRef() bool { // //go:nosplit func (r *SessionRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, SessionownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", SessionownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *SessionRefs) DecRef(destroy func()) { } func (r *SessionRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go index 58b0e80cf..5f148594c 100644 --- a/pkg/sentry/kernel/shm/shm_refs.go +++ b/pkg/sentry/kernel/shm/shm_refs.go @@ -7,9 +7,15 @@ import ( "gvisor.dev/gvisor/pkg/refsvfs2" ) -// ownerType is used to customize logging. Note that we use a pointer to T so -// that we do not copy the entire object when passed as a format parameter. -var ShmownerType *Shm +// enableLogging indicates whether reference-related events should be logged (with +// stack traces). This is false by default and should only be set to true for +// debugging purposes, as it can generate an extremely large amount of output +// and drastically degrade performance. +const ShmenableLogging = false + +// obj is used to customize logging. Note that we use a pointer to T so that +// we do not copy the entire object when passed as a format parameter. +var Shmobj *Shm // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -29,16 +35,24 @@ type ShmRefs struct { refCount int64 } -// EnableLeakCheck enables reference leak checking on r. -func (r *ShmRefs) EnableLeakCheck() { - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Register(r, fmt.Sprintf("%T", ShmownerType)) - } +// RefType implements refsvfs2.CheckedObject.RefType. +func (r *ShmRefs) RefType() string { + return fmt.Sprintf("%T", Shmobj)[1:] } // LeakMessage implements refsvfs2.CheckedObject.LeakMessage. func (r *ShmRefs) LeakMessage() string { - return fmt.Sprintf("%T %p: reference count of %d instead of 0", ShmownerType, r, r.ReadRefs()) + return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs()) +} + +// LogRefs implements refsvfs2.CheckedObject.LogRefs. +func (r *ShmRefs) LogRefs() bool { + return ShmenableLogging +} + +// EnableLeakCheck enables reference leak checking on r. +func (r *ShmRefs) EnableLeakCheck() { + refsvfs2.Register(r) } // ReadRefs returns the current number of references. The returned count is @@ -52,8 +66,10 @@ func (r *ShmRefs) ReadRefs() int64 { // //go:nosplit func (r *ShmRefs) IncRef() { - if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { - panic(fmt.Sprintf("Incrementing non-positive count %p on %T", r, ShmownerType)) + v := atomic.AddInt64(&r.refCount, 1) + refsvfs2.LogIncRef(r, v+1) + if v <= 0 { + panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -66,14 +82,14 @@ func (r *ShmRefs) IncRef() { //go:nosplit func (r *ShmRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - v := atomic.AddInt64(&r.refCount, speculativeRef) - if int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } - atomic.AddInt64(&r.refCount, -speculativeRef+1) + v := atomic.AddInt64(&r.refCount, -speculativeRef+1) + refsvfs2.LogTryIncRef(r, v+1) return true } @@ -90,14 +106,14 @@ func (r *ShmRefs) TryIncRef() bool { // //go:nosplit func (r *ShmRefs) DecRef(destroy func()) { - switch v := atomic.AddInt64(&r.refCount, -1); { + v := atomic.AddInt64(&r.refCount, -1) + refsvfs2.LogDecRef(r, v+1) + switch { case v < -1: - panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, ShmownerType)) + panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) case v == -1: - if refsvfs2.LeakCheckEnabled() { - refsvfs2.Unregister(r, fmt.Sprintf("%T", ShmownerType)) - } + refsvfs2.Unregister(r) if destroy != nil { destroy() @@ -106,7 +122,7 @@ func (r *ShmRefs) DecRef(destroy func()) { } func (r *ShmRefs) afterLoad() { - if refsvfs2.LeakCheckEnabled() && r.ReadRefs() > 0 { + if r.ReadRefs() > 0 { r.EnableLeakCheck() } } |