diff options
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r-- | pkg/sentry/mm/aio_context.go | 2 | ||||
-rw-r--r-- | pkg/sentry/mm/aio_mappable_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/mm/special_mappable.go | 2 | ||||
-rw-r--r-- | pkg/sentry/mm/special_mappable_refs.go | 33 |
4 files changed, 44 insertions, 26 deletions
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go index 7bf48cb2c..4c8cd38ed 100644 --- a/pkg/sentry/mm/aio_context.go +++ b/pkg/sentry/mm/aio_context.go @@ -252,7 +252,7 @@ func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) { return nil, err } m := aioMappable{mfp: mfp, fr: fr} - m.EnableLeakCheck() + m.InitRefs() return &m, nil } diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go index 9d94bf879..500477c1f 100644 --- a/pkg/sentry/mm/aio_mappable_refs.go +++ b/pkg/sentry/mm/aio_mappable_refs.go @@ -20,9 +20,6 @@ var aioMappableobj *aioMappable // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type aioMappableRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type aioMappableRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *aioMappableRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *aioMappableRefs) RefType() string { return fmt.Sprintf("%T", aioMappableobj)[1:] @@ -58,8 +62,7 @@ func (r *aioMappableRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *aioMappableRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *aioMappableRefs) ReadRefs() int64 { //go:nosplit func (r *aioMappableRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if aioMappableenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *aioMappableRefs) IncRef() { //go:nosplit func (r *aioMappableRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if aioMappableenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *aioMappableRefs) TryIncRef() bool { //go:nosplit func (r *aioMappableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if aioMappableenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go index 2dbe5b751..48d8b6a2b 100644 --- a/pkg/sentry/mm/special_mappable.go +++ b/pkg/sentry/mm/special_mappable.go @@ -44,7 +44,7 @@ type SpecialMappable struct { // Preconditions: fr.Length() != 0. func NewSpecialMappable(name string, mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *SpecialMappable { m := SpecialMappable{mfp: mfp, fr: fr, name: name} - m.EnableLeakCheck() + m.InitRefs() return &m } diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go index f17d4361f..60b4b7e92 100644 --- a/pkg/sentry/mm/special_mappable_refs.go +++ b/pkg/sentry/mm/special_mappable_refs.go @@ -20,9 +20,6 @@ var SpecialMappableobj *SpecialMappable // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type SpecialMappableRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type SpecialMappableRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *SpecialMappableRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *SpecialMappableRefs) RefType() string { return fmt.Sprintf("%T", SpecialMappableobj)[1:] @@ -58,8 +62,7 @@ func (r *SpecialMappableRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *SpecialMappableRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *SpecialMappableRefs) ReadRefs() int64 { //go:nosplit func (r *SpecialMappableRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if SpecialMappableenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *SpecialMappableRefs) IncRef() { //go:nosplit func (r *SpecialMappableRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if SpecialMappableenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *SpecialMappableRefs) TryIncRef() bool { //go:nosplit func (r *SpecialMappableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if SpecialMappableenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { |