diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-11-09 16:36:50 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-11-09 16:36:50 +0000 |
commit | 7dd056ef81a67dc95b3d079f20e58e771498220c (patch) | |
tree | f8a73367fa610fb309e60c7b8ffdddd13e6a6835 /pkg/sentry/kernel | |
parent | 9e848922ed33f78bddbb7a772dceba5406c185a2 (diff) | |
parent | 0fb5353e45f166460d5846576c20479072207a06 (diff) |
Merge release-20201030.0-53-g0fb5353e4 (automated)
Diffstat (limited to 'pkg/sentry/kernel')
-rw-r--r-- | pkg/sentry/kernel/fd_table_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/kernel/fd_table_unsafe.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/fs_context.go | 6 | ||||
-rw-r--r-- | pkg/sentry/kernel/fs_context_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/kernel/ipc_namespace.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/ipc_namespace_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/kernel/process_group_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/kernel/session_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/kernel/sessions.go | 6 | ||||
-rw-r--r-- | pkg/sentry/kernel/shm/shm.go | 2 | ||||
-rw-r--r-- | pkg/sentry/kernel/shm/shm_refs.go | 33 |
11 files changed, 135 insertions, 81 deletions
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go index 992606f36..f540ba371 100644 --- a/pkg/sentry/kernel/fd_table_refs.go +++ b/pkg/sentry/kernel/fd_table_refs.go @@ -20,9 +20,6 @@ var FDTableobj *FDTable // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type FDTableRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type FDTableRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *FDTableRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *FDTableRefs) RefType() string { return fmt.Sprintf("%T", FDTableobj)[1:] @@ -58,8 +62,7 @@ func (r *FDTableRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FDTableRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *FDTableRefs) ReadRefs() int64 { //go:nosplit func (r *FDTableRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if FDTableenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *FDTableRefs) IncRef() { //go:nosplit func (r *FDTableRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if FDTableenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *FDTableRefs) TryIncRef() bool { //go:nosplit func (r *FDTableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if FDTableenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/kernel/fd_table_unsafe.go b/pkg/sentry/kernel/fd_table_unsafe.go index 3476551f3..470d8bf83 100644 --- a/pkg/sentry/kernel/fd_table_unsafe.go +++ b/pkg/sentry/kernel/fd_table_unsafe.go @@ -43,7 +43,7 @@ func (f *FDTable) initNoLeakCheck() { // init initializes the table with leak checking. func (f *FDTable) init() { f.initNoLeakCheck() - f.EnableLeakCheck() + f.InitRefs() } // get gets a file entry. diff --git a/pkg/sentry/kernel/fs_context.go b/pkg/sentry/kernel/fs_context.go index 41fb2a784..dfde4deee 100644 --- a/pkg/sentry/kernel/fs_context.go +++ b/pkg/sentry/kernel/fs_context.go @@ -63,7 +63,7 @@ func newFSContext(root, cwd *fs.Dirent, umask uint) *FSContext { cwd: cwd, umask: umask, } - f.EnableLeakCheck() + f.InitRefs() return &f } @@ -76,7 +76,7 @@ func NewFSContextVFS2(root, cwd vfs.VirtualDentry, umask uint) *FSContext { cwdVFS2: cwd, umask: umask, } - f.EnableLeakCheck() + f.InitRefs() return &f } @@ -137,7 +137,7 @@ func (f *FSContext) Fork() *FSContext { rootVFS2: f.rootVFS2, umask: f.umask, } - ctx.EnableLeakCheck() + ctx.InitRefs() return ctx } diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go index ff812ab16..6510157c7 100644 --- a/pkg/sentry/kernel/fs_context_refs.go +++ b/pkg/sentry/kernel/fs_context_refs.go @@ -20,9 +20,6 @@ var FSContextobj *FSContext // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type FSContextRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type FSContextRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *FSContextRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *FSContextRefs) RefType() string { return fmt.Sprintf("%T", FSContextobj)[1:] @@ -58,8 +62,7 @@ func (r *FSContextRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FSContextRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *FSContextRefs) ReadRefs() int64 { //go:nosplit func (r *FSContextRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if FSContextenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *FSContextRefs) IncRef() { //go:nosplit func (r *FSContextRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if FSContextenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *FSContextRefs) TryIncRef() bool { //go:nosplit func (r *FSContextRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if FSContextenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/kernel/ipc_namespace.go b/pkg/sentry/kernel/ipc_namespace.go index b87e40dd1..9545bb5ef 100644 --- a/pkg/sentry/kernel/ipc_namespace.go +++ b/pkg/sentry/kernel/ipc_namespace.go @@ -41,7 +41,7 @@ func NewIPCNamespace(userNS *auth.UserNamespace) *IPCNamespace { semaphores: semaphore.NewRegistry(userNS), shms: shm.NewRegistry(userNS), } - ns.EnableLeakCheck() + ns.InitRefs() return ns } diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go index 5b37e617a..c0acf2f50 100644 --- a/pkg/sentry/kernel/ipc_namespace_refs.go +++ b/pkg/sentry/kernel/ipc_namespace_refs.go @@ -20,9 +20,6 @@ var IPCNamespaceobj *IPCNamespace // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type IPCNamespaceRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type IPCNamespaceRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *IPCNamespaceRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *IPCNamespaceRefs) RefType() string { return fmt.Sprintf("%T", IPCNamespaceobj)[1:] @@ -58,8 +62,7 @@ func (r *IPCNamespaceRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *IPCNamespaceRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *IPCNamespaceRefs) ReadRefs() int64 { //go:nosplit func (r *IPCNamespaceRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if IPCNamespaceenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *IPCNamespaceRefs) IncRef() { //go:nosplit func (r *IPCNamespaceRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if IPCNamespaceenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *IPCNamespaceRefs) TryIncRef() bool { //go:nosplit func (r *IPCNamespaceRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if IPCNamespaceenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go index 29bd0b80f..a9cc69b35 100644 --- a/pkg/sentry/kernel/process_group_refs.go +++ b/pkg/sentry/kernel/process_group_refs.go @@ -20,9 +20,6 @@ var ProcessGroupobj *ProcessGroup // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type ProcessGroupRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type ProcessGroupRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *ProcessGroupRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *ProcessGroupRefs) RefType() string { return fmt.Sprintf("%T", ProcessGroupobj)[1:] @@ -58,8 +62,7 @@ func (r *ProcessGroupRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *ProcessGroupRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *ProcessGroupRefs) ReadRefs() int64 { //go:nosplit func (r *ProcessGroupRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if ProcessGroupenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *ProcessGroupRefs) IncRef() { //go:nosplit func (r *ProcessGroupRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if ProcessGroupenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *ProcessGroupRefs) TryIncRef() bool { //go:nosplit func (r *ProcessGroupRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if ProcessGroupenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go index 430cb131c..0856ff261 100644 --- a/pkg/sentry/kernel/session_refs.go +++ b/pkg/sentry/kernel/session_refs.go @@ -20,9 +20,6 @@ var Sessionobj *Session // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type SessionRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type SessionRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *SessionRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *SessionRefs) RefType() string { return fmt.Sprintf("%T", Sessionobj)[1:] @@ -58,8 +62,7 @@ func (r *SessionRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *SessionRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *SessionRefs) ReadRefs() int64 { //go:nosplit func (r *SessionRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if SessionenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *SessionRefs) IncRef() { //go:nosplit func (r *SessionRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if SessionenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *SessionRefs) TryIncRef() bool { //go:nosplit func (r *SessionRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if SessionenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/kernel/sessions.go b/pkg/sentry/kernel/sessions.go index 5bddb0a36..0cd9e2533 100644 --- a/pkg/sentry/kernel/sessions.go +++ b/pkg/sentry/kernel/sessions.go @@ -295,7 +295,7 @@ func (tg *ThreadGroup) createSession() error { id: SessionID(id), leader: tg, } - s.EnableLeakCheck() + s.InitRefs() // Create a new ProcessGroup, belonging to that Session. // This also has a single reference (assigned below). @@ -309,7 +309,7 @@ func (tg *ThreadGroup) createSession() error { session: s, ancestors: 0, } - pg.refs.EnableLeakCheck() + pg.refs.InitRefs() // Tie them and return the result. s.processGroups.PushBack(pg) @@ -395,7 +395,7 @@ func (tg *ThreadGroup) CreateProcessGroup() error { originator: tg, session: tg.processGroup.session, } - pg.refs.EnableLeakCheck() + pg.refs.InitRefs() if tg.leader.parent != nil && tg.leader.parent.tg.processGroup.session == pg.session { pg.ancestors++ diff --git a/pkg/sentry/kernel/shm/shm.go b/pkg/sentry/kernel/shm/shm.go index ebbebf46b..92d60ba78 100644 --- a/pkg/sentry/kernel/shm/shm.go +++ b/pkg/sentry/kernel/shm/shm.go @@ -251,7 +251,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi creatorPID: pid, changeTime: ktime.NowFromContext(ctx), } - shm.EnableLeakCheck() + shm.InitRefs() // Find the next available ID. for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ { diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go index 5f148594c..82ca1ed06 100644 --- a/pkg/sentry/kernel/shm/shm_refs.go +++ b/pkg/sentry/kernel/shm/shm_refs.go @@ -20,9 +20,6 @@ var Shmobj *Shm // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type ShmRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type ShmRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *ShmRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *ShmRefs) RefType() string { return fmt.Sprintf("%T", Shmobj)[1:] @@ -58,8 +62,7 @@ func (r *ShmRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *ShmRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *ShmRefs) ReadRefs() int64 { //go:nosplit func (r *ShmRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if ShmenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *ShmRefs) IncRef() { //go:nosplit func (r *ShmRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if ShmenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *ShmRefs) TryIncRef() bool { //go:nosplit func (r *ShmRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if ShmenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { |