summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/mm
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/mm')
-rw-r--r--pkg/sentry/mm/aio_context.go7
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go118
-rw-r--r--pkg/sentry/mm/mm_state_autogen.go60
-rw-r--r--pkg/sentry/mm/special_mappable.go7
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go118
5 files changed, 296 insertions, 14 deletions
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 16fea53c4..7bf48cb2c 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -17,7 +17,6 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
@@ -239,7 +238,7 @@ func (ctx *AIOContext) Drain() {
//
// +stateify savable
type aioMappable struct {
- refs.AtomicRefCount
+ aioMappableRefs
mfp pgalloc.MemoryFileProvider
fr memmap.FileRange
@@ -253,13 +252,13 @@ func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) {
return nil, err
}
m := aioMappable{mfp: mfp, fr: fr}
- m.EnableLeakCheck("mm.aioMappable")
+ m.EnableLeakCheck()
return &m, nil
}
// DecRef implements refs.RefCounter.DecRef.
func (m *aioMappable) DecRef(ctx context.Context) {
- m.AtomicRefCount.DecRefWithDestructor(ctx, func(context.Context) {
+ m.aioMappableRefs.DecRef(func() {
m.mfp.MemoryFile().DecRef(m.fr)
})
}
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
new file mode 100644
index 000000000..b99909f07
--- /dev/null
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -0,0 +1,118 @@
+package mm
+
+import (
+ "fmt"
+ "runtime"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/log"
+ refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+)
+
+// ownerType is used to customize logging. Note that we use a pointer to T so
+// that we do not copy the entire object when passed as a format parameter.
+var aioMappableownerType *aioMappable
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// TODO(gvisor.dev/issue/1486): Store stack traces when leak check is enabled in
+// a map with 16-bit hashes, and store the hash in the top 16 bits of refCount.
+// This will allow us to add stack trace information to the leak messages
+// without growing the size of Refs.
+//
+// +stateify savable
+type aioMappableRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+func (r *aioMappableRefs) finalize() {
+ var note string
+ switch refs_vfs1.GetLeakMode() {
+ case refs_vfs1.NoLeakChecking:
+ return
+ case refs_vfs1.UninitializedLeakChecking:
+ note = "(Leak checker uninitialized): "
+ }
+ if n := r.ReadRefs(); n != 0 {
+ log.Warningf("%sRefs %p owned by %T garbage collected with ref count of %d (want 0)", note, r, aioMappableownerType, n)
+ }
+}
+
+// EnableLeakCheck checks for reference leaks when Refs gets garbage collected.
+func (r *aioMappableRefs) EnableLeakCheck() {
+ if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking {
+ runtime.SetFinalizer(r, (*aioMappableRefs).finalize)
+ }
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *aioMappableRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *aioMappableRefs) IncRef() {
+ if v := atomic.AddInt64(&r.refCount, 1); v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive ref count %p owned by %T", r, aioMappableownerType))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *aioMappableRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ v := atomic.AddInt64(&r.refCount, speculativeRef)
+ if int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *aioMappableRefs) DecRef(destroy func()) {
+ switch v := atomic.AddInt64(&r.refCount, -1); {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, aioMappableownerType))
+
+ case v == -1:
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
diff --git a/pkg/sentry/mm/mm_state_autogen.go b/pkg/sentry/mm/mm_state_autogen.go
index 2f81a8240..8ab51450c 100644
--- a/pkg/sentry/mm/mm_state_autogen.go
+++ b/pkg/sentry/mm/mm_state_autogen.go
@@ -92,7 +92,7 @@ func (x *aioMappable) StateTypeName() string {
func (x *aioMappable) StateFields() []string {
return []string{
- "AtomicRefCount",
+ "aioMappableRefs",
"mfp",
"fr",
}
@@ -102,7 +102,7 @@ func (x *aioMappable) beforeSave() {}
func (x *aioMappable) StateSave(m state.Sink) {
x.beforeSave()
- m.Save(0, &x.AtomicRefCount)
+ m.Save(0, &x.aioMappableRefs)
m.Save(1, &x.mfp)
m.Save(2, &x.fr)
}
@@ -110,11 +110,34 @@ func (x *aioMappable) StateSave(m state.Sink) {
func (x *aioMappable) afterLoad() {}
func (x *aioMappable) StateLoad(m state.Source) {
- m.Load(0, &x.AtomicRefCount)
+ m.Load(0, &x.aioMappableRefs)
m.Load(1, &x.mfp)
m.Load(2, &x.fr)
}
+func (x *aioMappableRefs) StateTypeName() string {
+ return "pkg/sentry/mm.aioMappableRefs"
+}
+
+func (x *aioMappableRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (x *aioMappableRefs) beforeSave() {}
+
+func (x *aioMappableRefs) StateSave(m state.Sink) {
+ x.beforeSave()
+ m.Save(0, &x.refCount)
+}
+
+func (x *aioMappableRefs) afterLoad() {}
+
+func (x *aioMappableRefs) StateLoad(m state.Source) {
+ m.Load(0, &x.refCount)
+}
+
func (x *fileRefcountSet) StateTypeName() string {
return "pkg/sentry/mm.fileRefcountSet"
}
@@ -565,7 +588,7 @@ func (x *SpecialMappable) StateTypeName() string {
func (x *SpecialMappable) StateFields() []string {
return []string{
- "AtomicRefCount",
+ "SpecialMappableRefs",
"mfp",
"fr",
"name",
@@ -576,7 +599,7 @@ func (x *SpecialMappable) beforeSave() {}
func (x *SpecialMappable) StateSave(m state.Sink) {
x.beforeSave()
- m.Save(0, &x.AtomicRefCount)
+ m.Save(0, &x.SpecialMappableRefs)
m.Save(1, &x.mfp)
m.Save(2, &x.fr)
m.Save(3, &x.name)
@@ -585,12 +608,35 @@ func (x *SpecialMappable) StateSave(m state.Sink) {
func (x *SpecialMappable) afterLoad() {}
func (x *SpecialMappable) StateLoad(m state.Source) {
- m.Load(0, &x.AtomicRefCount)
+ m.Load(0, &x.SpecialMappableRefs)
m.Load(1, &x.mfp)
m.Load(2, &x.fr)
m.Load(3, &x.name)
}
+func (x *SpecialMappableRefs) StateTypeName() string {
+ return "pkg/sentry/mm.SpecialMappableRefs"
+}
+
+func (x *SpecialMappableRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (x *SpecialMappableRefs) beforeSave() {}
+
+func (x *SpecialMappableRefs) StateSave(m state.Sink) {
+ x.beforeSave()
+ m.Save(0, &x.refCount)
+}
+
+func (x *SpecialMappableRefs) afterLoad() {}
+
+func (x *SpecialMappableRefs) StateLoad(m state.Source) {
+ m.Load(0, &x.refCount)
+}
+
func (x *vmaSet) StateTypeName() string {
return "pkg/sentry/mm.vmaSet"
}
@@ -693,6 +739,7 @@ func init() {
state.Register((*ioResult)(nil))
state.Register((*AIOContext)(nil))
state.Register((*aioMappable)(nil))
+ state.Register((*aioMappableRefs)(nil))
state.Register((*fileRefcountSet)(nil))
state.Register((*fileRefcountnode)(nil))
state.Register((*fileRefcountSegmentDataSlices)(nil))
@@ -706,6 +753,7 @@ func init() {
state.Register((*pmanode)(nil))
state.Register((*pmaSegmentDataSlices)(nil))
state.Register((*SpecialMappable)(nil))
+ state.Register((*SpecialMappableRefs)(nil))
state.Register((*vmaSet)(nil))
state.Register((*vmanode)(nil))
state.Register((*vmaSegmentDataSlices)(nil))
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index 4cdb52eb6..f4c93baeb 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -16,7 +16,6 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
@@ -31,7 +30,7 @@ import (
//
// +stateify savable
type SpecialMappable struct {
- refs.AtomicRefCount
+ SpecialMappableRefs
mfp pgalloc.MemoryFileProvider
fr memmap.FileRange
@@ -45,13 +44,13 @@ type SpecialMappable struct {
// Preconditions: fr.Length() != 0.
func NewSpecialMappable(name string, mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *SpecialMappable {
m := SpecialMappable{mfp: mfp, fr: fr, name: name}
- m.EnableLeakCheck("mm.SpecialMappable")
+ m.EnableLeakCheck()
return &m
}
// DecRef implements refs.RefCounter.DecRef.
func (m *SpecialMappable) DecRef(ctx context.Context) {
- m.AtomicRefCount.DecRefWithDestructor(ctx, func(context.Context) {
+ m.SpecialMappableRefs.DecRef(func() {
m.mfp.MemoryFile().DecRef(m.fr)
})
}
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
new file mode 100644
index 000000000..035bbe690
--- /dev/null
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -0,0 +1,118 @@
+package mm
+
+import (
+ "fmt"
+ "runtime"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/log"
+ refs_vfs1 "gvisor.dev/gvisor/pkg/refs"
+)
+
+// ownerType is used to customize logging. Note that we use a pointer to T so
+// that we do not copy the entire object when passed as a format parameter.
+var SpecialMappableownerType *SpecialMappable
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// TODO(gvisor.dev/issue/1486): Store stack traces when leak check is enabled in
+// a map with 16-bit hashes, and store the hash in the top 16 bits of refCount.
+// This will allow us to add stack trace information to the leak messages
+// without growing the size of Refs.
+//
+// +stateify savable
+type SpecialMappableRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+func (r *SpecialMappableRefs) finalize() {
+ var note string
+ switch refs_vfs1.GetLeakMode() {
+ case refs_vfs1.NoLeakChecking:
+ return
+ case refs_vfs1.UninitializedLeakChecking:
+ note = "(Leak checker uninitialized): "
+ }
+ if n := r.ReadRefs(); n != 0 {
+ log.Warningf("%sRefs %p owned by %T garbage collected with ref count of %d (want 0)", note, r, SpecialMappableownerType, n)
+ }
+}
+
+// EnableLeakCheck checks for reference leaks when Refs gets garbage collected.
+func (r *SpecialMappableRefs) EnableLeakCheck() {
+ if refs_vfs1.GetLeakMode() != refs_vfs1.NoLeakChecking {
+ runtime.SetFinalizer(r, (*SpecialMappableRefs).finalize)
+ }
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *SpecialMappableRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *SpecialMappableRefs) IncRef() {
+ if v := atomic.AddInt64(&r.refCount, 1); v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive ref count %p owned by %T", r, SpecialMappableownerType))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *SpecialMappableRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ v := atomic.AddInt64(&r.refCount, speculativeRef)
+ if int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *SpecialMappableRefs) DecRef(destroy func()) {
+ switch v := atomic.AddInt64(&r.refCount, -1); {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %T", r, SpecialMappableownerType))
+
+ case v == -1:
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}