summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2020-11-09 16:36:50 +0000
committergVisor bot <gvisor-bot@google.com>2020-11-09 16:36:50 +0000
commit7dd056ef81a67dc95b3d079f20e58e771498220c (patch)
treef8a73367fa610fb309e60c7b8ffdddd13e6a6835 /pkg
parent9e848922ed33f78bddbb7a772dceba5406c185a2 (diff)
parent0fb5353e45f166460d5846576c20479072207a06 (diff)
Merge release-20201030.0-53-g0fb5353e4 (automated)
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts.go2
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/fuse/fusefs.go8
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go12
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go33
-rw-r--r--pkg/sentry/fsimpl/host/host.go2
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/host/socket.go6
-rw-r--r--pkg/sentry/fsimpl/kernfs/inode_impl_util.go2
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs.go16
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go33
-rw-r--r--pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go33
-rw-r--r--pkg/sentry/fsimpl/overlay/overlay.go16
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks.go2
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/proc/task.go2
-rw-r--r--pkg/sentry/fsimpl/proc/task_fds.go4
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/proc/tasks.go2
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go33
-rw-r--r--pkg/sentry/fsimpl/sys/sys.go2
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go33
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs.go2
-rw-r--r--pkg/sentry/kernel/fd_table_refs.go33
-rw-r--r--pkg/sentry/kernel/fd_table_unsafe.go2
-rw-r--r--pkg/sentry/kernel/fs_context.go6
-rw-r--r--pkg/sentry/kernel/fs_context_refs.go33
-rw-r--r--pkg/sentry/kernel/ipc_namespace.go2
-rw-r--r--pkg/sentry/kernel/ipc_namespace_refs.go33
-rw-r--r--pkg/sentry/kernel/process_group_refs.go33
-rw-r--r--pkg/sentry/kernel/session_refs.go33
-rw-r--r--pkg/sentry/kernel/sessions.go6
-rw-r--r--pkg/sentry/kernel/shm/shm.go2
-rw-r--r--pkg/sentry/kernel/shm/shm_refs.go33
-rw-r--r--pkg/sentry/mm/aio_context.go2
-rw-r--r--pkg/sentry/mm/aio_mappable_refs.go33
-rw-r--r--pkg/sentry/mm/special_mappable.go2
-rw-r--r--pkg/sentry/mm/special_mappable_refs.go33
-rw-r--r--pkg/sentry/socket/unix/socket_refs.go33
-rw-r--r--pkg/sentry/socket/unix/socket_vfs2_refs.go33
-rw-r--r--pkg/sentry/socket/unix/transport/connectioned.go8
-rw-r--r--pkg/sentry/socket/unix/transport/connectionless.go2
-rw-r--r--pkg/sentry/socket/unix/transport/queue_refs.go33
-rw-r--r--pkg/sentry/socket/unix/unix.go2
-rw-r--r--pkg/sentry/socket/unix/unix_vfs2.go2
-rw-r--r--pkg/sentry/vfs/file_description.go2
-rw-r--r--pkg/sentry/vfs/file_description_refs.go33
-rw-r--r--pkg/sentry/vfs/filesystem.go2
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go33
-rw-r--r--pkg/sentry/vfs/mount.go13
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go33
-rw-r--r--pkg/tcpip/link/tun/device.go2
-rw-r--r--pkg/tcpip/link/tun/tun_endpoint_refs.go33
57 files changed, 667 insertions, 390 deletions
diff --git a/pkg/sentry/fsimpl/devpts/devpts.go b/pkg/sentry/fsimpl/devpts/devpts.go
index 346cca558..d8c237753 100644
--- a/pkg/sentry/fsimpl/devpts/devpts.go
+++ b/pkg/sentry/fsimpl/devpts/devpts.go
@@ -110,7 +110,7 @@ func (fstype *FilesystemType) newFilesystem(ctx context.Context, vfsObj *vfs.Vir
}
root.InodeAttrs.Init(ctx, creds, linux.UNNAMED_MAJOR, devMinor, 1, linux.ModeDirectory|0555)
root.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- root.EnableLeakCheck()
+ root.InitRefs()
var rootD kernfs.Dentry
rootD.InitRoot(&fs.Filesystem, root)
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
index cbafd113a..9246cf66e 100644
--- a/pkg/sentry/fsimpl/devpts/root_inode_refs.go
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -20,9 +20,6 @@ var rootInodeobj *rootInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type rootInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type rootInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *rootInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *rootInodeRefs) RefType() string {
return fmt.Sprintf("%T", rootInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *rootInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *rootInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *rootInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *rootInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if rootInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *rootInodeRefs) IncRef() {
//go:nosplit
func (r *rootInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if rootInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *rootInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *rootInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if rootInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/fuse/fusefs.go b/pkg/sentry/fsimpl/fuse/fusefs.go
index 6de416da0..cd0eb56e5 100644
--- a/pkg/sentry/fsimpl/fuse/fusefs.go
+++ b/pkg/sentry/fsimpl/fuse/fusefs.go
@@ -219,16 +219,12 @@ func newFUSEFilesystem(ctx context.Context, devMinor uint32, opts *filesystemOpt
}
fuseFD := device.Impl().(*DeviceFD)
-
fs := &filesystem{
devMinor: devMinor,
opts: opts,
conn: conn,
}
-
- fs.VFSFilesystem().IncRef()
fuseFD.fs = fs
-
return fs, nil
}
@@ -288,7 +284,7 @@ func (fs *filesystem) newRoot(ctx context.Context, creds *auth.Credentials, mode
i := &inode{fs: fs, nodeID: 1}
i.InodeAttrs.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, 1, linux.ModeDirectory|0755)
i.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- i.EnableLeakCheck()
+ i.InitRefs()
var d kernfs.Dentry
d.InitRoot(&fs.Filesystem, i)
@@ -301,7 +297,7 @@ func (fs *filesystem) newInode(ctx context.Context, nodeID uint64, attr linux.FU
i.InodeAttrs.Init(ctx, &creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.FileMode(attr.Mode))
atomic.StoreUint64(&i.size, attr.Size)
i.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- i.EnableLeakCheck()
+ i.InitRefs()
return i
}
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
index 31cb3791f..37a39e976 100644
--- a/pkg/sentry/fsimpl/fuse/inode_refs.go
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -20,9 +20,6 @@ var inodeobj *inode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type inodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type inodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *inodeRefs) RefType() string {
return fmt.Sprintf("%T", inodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *inodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *inodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *inodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *inodeRefs) IncRef() {
//go:nosplit
func (r *inodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *inodeRefs) TryIncRef() bool {
//go:nosplit
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index b59f62652..53bcc9986 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -1236,7 +1236,9 @@ func (d *dentry) IncRef() {
// d.refs may be 0 if d.fs.renameMu is locked, which serializes against
// d.checkCachingLocked().
r := atomic.AddInt64(&d.refs, 1)
- refsvfs2.LogIncRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogIncRef(d, r)
+ }
}
// TryIncRef implements vfs.DentryImpl.TryIncRef.
@@ -1247,7 +1249,9 @@ func (d *dentry) TryIncRef() bool {
return false
}
if atomic.CompareAndSwapInt64(&d.refs, r, r+1) {
- refsvfs2.LogTryIncRef(d, r+1)
+ if d.LogRefs() {
+ refsvfs2.LogTryIncRef(d, r+1)
+ }
return true
}
}
@@ -1267,7 +1271,9 @@ func (d *dentry) DecRef(ctx context.Context) {
// responsible for ensuring that d.checkCachingLocked will be called later.
func (d *dentry) decRefNoCaching() int64 {
r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogDecRef(d, r)
+ }
if r < 0 {
panic("gofer.dentry.decRefNoCaching() called without holding a reference")
}
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
index faa9d2fd2..3f5f4ebc3 100644
--- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -20,9 +20,6 @@ var ConnectedEndpointobj *ConnectedEndpoint
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type ConnectedEndpointRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type ConnectedEndpointRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ConnectedEndpointRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *ConnectedEndpointRefs) RefType() string {
return fmt.Sprintf("%T", ConnectedEndpointobj)[1:]
@@ -58,8 +62,7 @@ func (r *ConnectedEndpointRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ConnectedEndpointRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *ConnectedEndpointRefs) ReadRefs() int64 {
//go:nosplit
func (r *ConnectedEndpointRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *ConnectedEndpointRefs) IncRef() {
//go:nosplit
func (r *ConnectedEndpointRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *ConnectedEndpointRefs) TryIncRef() bool {
//go:nosplit
func (r *ConnectedEndpointRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if ConnectedEndpointenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/host/host.go b/pkg/sentry/fsimpl/host/host.go
index 39b902a3e..435a21d77 100644
--- a/pkg/sentry/fsimpl/host/host.go
+++ b/pkg/sentry/fsimpl/host/host.go
@@ -126,8 +126,8 @@ func newInode(ctx context.Context, fs *filesystem, hostFD int, savable bool, fil
isTTY: isTTY,
savable: savable,
}
+ i.InitRefs()
i.CachedMappable.Init(hostFD)
- i.EnableLeakCheck()
// If the hostFD can return EWOULDBLOCK when set to non-blocking, do so and
// handle blocking behavior in the sentry.
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
index ef2f56522..4c850a7ac 100644
--- a/pkg/sentry/fsimpl/host/inode_refs.go
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -20,9 +20,6 @@ var inodeobj *inode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type inodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type inodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *inodeRefs) RefType() string {
return fmt.Sprintf("%T", inodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *inodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *inodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *inodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *inodeRefs) IncRef() {
//go:nosplit
func (r *inodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *inodeRefs) TryIncRef() bool {
//go:nosplit
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/host/socket.go b/pkg/sentry/fsimpl/host/socket.go
index 8a447e29f..60acc367f 100644
--- a/pkg/sentry/fsimpl/host/socket.go
+++ b/pkg/sentry/fsimpl/host/socket.go
@@ -84,6 +84,8 @@ type ConnectedEndpoint struct {
// init performs initialization required for creating new ConnectedEndpoints and
// for restoring them.
func (c *ConnectedEndpoint) init() *syserr.Error {
+ c.InitRefs()
+
family, err := syscall.GetsockoptInt(c.fd, syscall.SOL_SOCKET, syscall.SO_DOMAIN)
if err != nil {
return syserr.FromError(err)
@@ -132,7 +134,6 @@ func NewConnectedEndpoint(ctx context.Context, hostFD int, addr string, saveable
// ConnectedEndpointRefs start off with a single reference. We need two.
e.IncRef()
- e.EnableLeakCheck()
return &e, nil
}
@@ -376,8 +377,7 @@ func NewSCMEndpoint(ctx context.Context, hostFD int, queue *waiter.Queue, addr s
return nil, err
}
- // ConnectedEndpointRefs start off with a single reference. We need two.
+ // e starts off with a single reference. We need two.
e.IncRef()
- e.EnableLeakCheck()
return &e, nil
}
diff --git a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
index d83c17f83..eac578f25 100644
--- a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
+++ b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
@@ -660,7 +660,7 @@ var _ Inode = (*StaticDirectory)(nil)
func NewStaticDir(ctx context.Context, creds *auth.Credentials, devMajor, devMinor uint32, ino uint64, perm linux.FileMode, children map[string]Inode, fdOpts GenericDirectoryFDOptions) Inode {
inode := &StaticDirectory{}
inode.Init(ctx, creds, devMajor, devMinor, ino, perm, fdOpts)
- inode.EnableLeakCheck()
+ inode.InitRefs()
inode.OrderedChildren.Init(OrderedChildrenOptions{})
links := inode.OrderedChildren.Populate(children)
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs.go b/pkg/sentry/fsimpl/kernfs/kernfs.go
index abb477c7d..c14abcff4 100644
--- a/pkg/sentry/fsimpl/kernfs/kernfs.go
+++ b/pkg/sentry/fsimpl/kernfs/kernfs.go
@@ -222,7 +222,9 @@ func (d *Dentry) IncRef() {
// d.refs may be 0 if d.fs.mu is locked, which serializes against
// d.cacheLocked().
r := atomic.AddInt64(&d.refs, 1)
- refsvfs2.LogIncRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogIncRef(d, r)
+ }
}
// TryIncRef implements vfs.DentryImpl.TryIncRef.
@@ -233,7 +235,9 @@ func (d *Dentry) TryIncRef() bool {
return false
}
if atomic.CompareAndSwapInt64(&d.refs, r, r+1) {
- refsvfs2.LogTryIncRef(d, r+1)
+ if d.LogRefs() {
+ refsvfs2.LogTryIncRef(d, r+1)
+ }
return true
}
}
@@ -242,7 +246,9 @@ func (d *Dentry) TryIncRef() bool {
// DecRef implements vfs.DentryImpl.DecRef.
func (d *Dentry) DecRef(ctx context.Context) {
r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogDecRef(d, r)
+ }
if r == 0 {
d.fs.mu.Lock()
d.cacheLocked(ctx)
@@ -254,7 +260,9 @@ func (d *Dentry) DecRef(ctx context.Context) {
func (d *Dentry) decRefLocked(ctx context.Context) {
r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogDecRef(d, r)
+ }
if r == 0 {
d.cacheLocked(ctx)
} else if r < 0 {
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
index d86cf76fe..cdf6374a3 100644
--- a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -20,9 +20,6 @@ var StaticDirectoryobj *StaticDirectory
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type StaticDirectoryRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type StaticDirectoryRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *StaticDirectoryRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *StaticDirectoryRefs) RefType() string {
return fmt.Sprintf("%T", StaticDirectoryobj)[1:]
@@ -58,8 +62,7 @@ func (r *StaticDirectoryRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *StaticDirectoryRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *StaticDirectoryRefs) ReadRefs() int64 {
//go:nosplit
func (r *StaticDirectoryRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *StaticDirectoryRefs) IncRef() {
//go:nosplit
func (r *StaticDirectoryRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *StaticDirectoryRefs) TryIncRef() bool {
//go:nosplit
func (r *StaticDirectoryRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if StaticDirectoryenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
index 446837e30..69b41668a 100644
--- a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
+++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
@@ -20,9 +20,6 @@ var syntheticDirectoryobj *syntheticDirectory
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type syntheticDirectoryRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type syntheticDirectoryRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *syntheticDirectoryRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *syntheticDirectoryRefs) RefType() string {
return fmt.Sprintf("%T", syntheticDirectoryobj)[1:]
@@ -58,8 +62,7 @@ func (r *syntheticDirectoryRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *syntheticDirectoryRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *syntheticDirectoryRefs) ReadRefs() int64 {
//go:nosplit
func (r *syntheticDirectoryRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *syntheticDirectoryRefs) IncRef() {
//go:nosplit
func (r *syntheticDirectoryRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *syntheticDirectoryRefs) TryIncRef() bool {
//go:nosplit
func (r *syntheticDirectoryRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if syntheticDirectoryenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/overlay/overlay.go b/pkg/sentry/fsimpl/overlay/overlay.go
index 73130bc8d..3492409b2 100644
--- a/pkg/sentry/fsimpl/overlay/overlay.go
+++ b/pkg/sentry/fsimpl/overlay/overlay.go
@@ -514,7 +514,9 @@ func (d *dentry) IncRef() {
// d.refs may be 0 if d.fs.renameMu is locked, which serializes against
// d.checkDropLocked().
r := atomic.AddInt64(&d.refs, 1)
- refsvfs2.LogIncRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogIncRef(d, r)
+ }
}
// TryIncRef implements vfs.DentryImpl.TryIncRef.
@@ -525,7 +527,9 @@ func (d *dentry) TryIncRef() bool {
return false
}
if atomic.CompareAndSwapInt64(&d.refs, r, r+1) {
- refsvfs2.LogTryIncRef(d, r+1)
+ if d.LogRefs() {
+ refsvfs2.LogTryIncRef(d, r+1)
+ }
return true
}
}
@@ -534,7 +538,9 @@ func (d *dentry) TryIncRef() bool {
// DecRef implements vfs.DentryImpl.DecRef.
func (d *dentry) DecRef(ctx context.Context) {
r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogDecRef(d, r)
+ }
if r == 0 {
d.fs.renameMu.Lock()
d.checkDropLocked(ctx)
@@ -546,7 +552,9 @@ func (d *dentry) DecRef(ctx context.Context) {
func (d *dentry) decRefLocked(ctx context.Context) {
r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
+ if d.LogRefs() {
+ refsvfs2.LogDecRef(d, r)
+ }
if r == 0 {
d.checkDropLocked(ctx)
} else if r < 0 {
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
index 895298eb9..4644809bd 100644
--- a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -20,9 +20,6 @@ var fdDirInodeobj *fdDirInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type fdDirInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type fdDirInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *fdDirInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *fdDirInodeRefs) RefType() string {
return fmt.Sprintf("%T", fdDirInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *fdDirInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *fdDirInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *fdDirInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *fdDirInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if fdDirInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *fdDirInodeRefs) IncRef() {
//go:nosplit
func (r *fdDirInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if fdDirInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *fdDirInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *fdDirInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if fdDirInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
index 8e3458485..dbc7e3f5a 100644
--- a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -20,9 +20,6 @@ var fdInfoDirInodeobj *fdInfoDirInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type fdInfoDirInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type fdInfoDirInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *fdInfoDirInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *fdInfoDirInodeRefs) RefType() string {
return fmt.Sprintf("%T", fdInfoDirInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *fdInfoDirInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *fdInfoDirInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *fdInfoDirInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *fdInfoDirInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *fdInfoDirInodeRefs) IncRef() {
//go:nosplit
func (r *fdInfoDirInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *fdInfoDirInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *fdInfoDirInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if fdInfoDirInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/proc/subtasks.go b/pkg/sentry/fsimpl/proc/subtasks.go
index cb3c5e0fd..e001d5032 100644
--- a/pkg/sentry/fsimpl/proc/subtasks.go
+++ b/pkg/sentry/fsimpl/proc/subtasks.go
@@ -60,7 +60,7 @@ func (fs *filesystem) newSubtasks(task *kernel.Task, pidns *kernel.PIDNamespace,
// Note: credentials are overridden by taskOwnedInode.
subInode.InodeAttrs.Init(task, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0555)
subInode.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- subInode.EnableLeakCheck()
+ subInode.InitRefs()
inode := &taskOwnedInode{Inode: subInode, owner: task}
return inode
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
index c609e618a..993251646 100644
--- a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -20,9 +20,6 @@ var subtasksInodeobj *subtasksInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type subtasksInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type subtasksInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *subtasksInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *subtasksInodeRefs) RefType() string {
return fmt.Sprintf("%T", subtasksInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *subtasksInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *subtasksInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *subtasksInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *subtasksInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if subtasksInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *subtasksInodeRefs) IncRef() {
//go:nosplit
func (r *subtasksInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if subtasksInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *subtasksInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *subtasksInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if subtasksInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/proc/task.go b/pkg/sentry/fsimpl/proc/task.go
index 19011b010..dc46a09bc 100644
--- a/pkg/sentry/fsimpl/proc/task.go
+++ b/pkg/sentry/fsimpl/proc/task.go
@@ -91,7 +91,7 @@ func (fs *filesystem) newTaskInode(task *kernel.Task, pidns *kernel.PIDNamespace
taskInode := &taskInode{task: task}
// Note: credentials are overridden by taskOwnedInode.
taskInode.InodeAttrs.Init(task, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0555)
- taskInode.EnableLeakCheck()
+ taskInode.InitRefs()
inode := &taskOwnedInode{Inode: taskInode, owner: task}
diff --git a/pkg/sentry/fsimpl/proc/task_fds.go b/pkg/sentry/fsimpl/proc/task_fds.go
index d268b44be..3ec4471f5 100644
--- a/pkg/sentry/fsimpl/proc/task_fds.go
+++ b/pkg/sentry/fsimpl/proc/task_fds.go
@@ -128,7 +128,7 @@ func (fs *filesystem) newFDDirInode(task *kernel.Task) kernfs.Inode {
},
}
inode.InodeAttrs.Init(task, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0555)
- inode.EnableLeakCheck()
+ inode.InitRefs()
inode.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
return inode
}
@@ -265,7 +265,7 @@ func (fs *filesystem) newFDInfoDirInode(task *kernel.Task) kernfs.Inode {
},
}
inode.InodeAttrs.Init(task, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0555)
- inode.EnableLeakCheck()
+ inode.InitRefs()
inode.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
return inode
}
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
index 2ee58bd62..632251e75 100644
--- a/pkg/sentry/fsimpl/proc/task_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -20,9 +20,6 @@ var taskInodeobj *taskInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type taskInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type taskInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *taskInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *taskInodeRefs) RefType() string {
return fmt.Sprintf("%T", taskInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *taskInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *taskInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *taskInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *taskInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if taskInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *taskInodeRefs) IncRef() {
//go:nosplit
func (r *taskInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if taskInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *taskInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *taskInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if taskInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/proc/tasks.go b/pkg/sentry/fsimpl/proc/tasks.go
index b81ea14bf..151d1f10d 100644
--- a/pkg/sentry/fsimpl/proc/tasks.go
+++ b/pkg/sentry/fsimpl/proc/tasks.go
@@ -83,7 +83,7 @@ func (fs *filesystem) newTasksInode(ctx context.Context, k *kernel.Kernel, pidns
cgroupControllers: cgroupControllers,
}
inode.InodeAttrs.Init(ctx, root, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0555)
- inode.EnableLeakCheck()
+ inode.InitRefs()
inode.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
links := inode.OrderedChildren.Populate(contents)
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
index 71dcaed9e..0b2af4269 100644
--- a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -20,9 +20,6 @@ var tasksInodeobj *tasksInode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type tasksInodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type tasksInodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *tasksInodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *tasksInodeRefs) RefType() string {
return fmt.Sprintf("%T", tasksInodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *tasksInodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *tasksInodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *tasksInodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *tasksInodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if tasksInodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *tasksInodeRefs) IncRef() {
//go:nosplit
func (r *tasksInodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if tasksInodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *tasksInodeRefs) TryIncRef() bool {
//go:nosplit
func (r *tasksInodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if tasksInodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
index 176a4bf98..a45aa7f78 100644
--- a/pkg/sentry/fsimpl/sys/dir_refs.go
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -20,9 +20,6 @@ var dirobj *dir
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type dirRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type dirRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *dirRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *dirRefs) RefType() string {
return fmt.Sprintf("%T", dirobj)[1:]
@@ -58,8 +62,7 @@ func (r *dirRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *dirRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *dirRefs) ReadRefs() int64 {
//go:nosplit
func (r *dirRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if direnableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *dirRefs) IncRef() {
//go:nosplit
func (r *dirRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if direnableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *dirRefs) TryIncRef() bool {
//go:nosplit
func (r *dirRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if direnableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/sys/sys.go b/pkg/sentry/fsimpl/sys/sys.go
index 506a2a0f0..79bc3fe88 100644
--- a/pkg/sentry/fsimpl/sys/sys.go
+++ b/pkg/sentry/fsimpl/sys/sys.go
@@ -160,7 +160,7 @@ func (fs *filesystem) newDir(ctx context.Context, creds *auth.Credentials, mode
d := &dir{}
d.InodeAttrs.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|0755)
d.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- d.EnableLeakCheck()
+ d.InitRefs()
d.IncLinks(d.OrderedChildren.Populate(contents))
return d
}
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
index eff30ca09..51ee15409 100644
--- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -20,9 +20,6 @@ var inodeobj *inode
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type inodeRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type inodeRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *inodeRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *inodeRefs) RefType() string {
return fmt.Sprintf("%T", inodeobj)[1:]
@@ -58,8 +62,7 @@ func (r *inodeRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *inodeRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *inodeRefs) ReadRefs() int64 {
//go:nosplit
func (r *inodeRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if inodeenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *inodeRefs) IncRef() {
//go:nosplit
func (r *inodeRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *inodeRefs) TryIncRef() bool {
//go:nosplit
func (r *inodeRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if inodeenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
index 4ce859d57..85a3dfe20 100644
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
@@ -402,7 +402,7 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth
i.mtime = now
// i.nlink initialized by caller
i.impl = impl
- i.refs.EnableLeakCheck()
+ i.refs.InitRefs()
}
// incLinksLocked increments i's link count.
diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go
index 992606f36..f540ba371 100644
--- a/pkg/sentry/kernel/fd_table_refs.go
+++ b/pkg/sentry/kernel/fd_table_refs.go
@@ -20,9 +20,6 @@ var FDTableobj *FDTable
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type FDTableRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type FDTableRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FDTableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *FDTableRefs) RefType() string {
return fmt.Sprintf("%T", FDTableobj)[1:]
@@ -58,8 +62,7 @@ func (r *FDTableRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FDTableRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *FDTableRefs) ReadRefs() int64 {
//go:nosplit
func (r *FDTableRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if FDTableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *FDTableRefs) IncRef() {
//go:nosplit
func (r *FDTableRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if FDTableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *FDTableRefs) TryIncRef() bool {
//go:nosplit
func (r *FDTableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if FDTableenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/kernel/fd_table_unsafe.go b/pkg/sentry/kernel/fd_table_unsafe.go
index 3476551f3..470d8bf83 100644
--- a/pkg/sentry/kernel/fd_table_unsafe.go
+++ b/pkg/sentry/kernel/fd_table_unsafe.go
@@ -43,7 +43,7 @@ func (f *FDTable) initNoLeakCheck() {
// init initializes the table with leak checking.
func (f *FDTable) init() {
f.initNoLeakCheck()
- f.EnableLeakCheck()
+ f.InitRefs()
}
// get gets a file entry.
diff --git a/pkg/sentry/kernel/fs_context.go b/pkg/sentry/kernel/fs_context.go
index 41fb2a784..dfde4deee 100644
--- a/pkg/sentry/kernel/fs_context.go
+++ b/pkg/sentry/kernel/fs_context.go
@@ -63,7 +63,7 @@ func newFSContext(root, cwd *fs.Dirent, umask uint) *FSContext {
cwd: cwd,
umask: umask,
}
- f.EnableLeakCheck()
+ f.InitRefs()
return &f
}
@@ -76,7 +76,7 @@ func NewFSContextVFS2(root, cwd vfs.VirtualDentry, umask uint) *FSContext {
cwdVFS2: cwd,
umask: umask,
}
- f.EnableLeakCheck()
+ f.InitRefs()
return &f
}
@@ -137,7 +137,7 @@ func (f *FSContext) Fork() *FSContext {
rootVFS2: f.rootVFS2,
umask: f.umask,
}
- ctx.EnableLeakCheck()
+ ctx.InitRefs()
return ctx
}
diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go
index ff812ab16..6510157c7 100644
--- a/pkg/sentry/kernel/fs_context_refs.go
+++ b/pkg/sentry/kernel/fs_context_refs.go
@@ -20,9 +20,6 @@ var FSContextobj *FSContext
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type FSContextRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type FSContextRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FSContextRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *FSContextRefs) RefType() string {
return fmt.Sprintf("%T", FSContextobj)[1:]
@@ -58,8 +62,7 @@ func (r *FSContextRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FSContextRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *FSContextRefs) ReadRefs() int64 {
//go:nosplit
func (r *FSContextRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if FSContextenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *FSContextRefs) IncRef() {
//go:nosplit
func (r *FSContextRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if FSContextenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *FSContextRefs) TryIncRef() bool {
//go:nosplit
func (r *FSContextRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if FSContextenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/kernel/ipc_namespace.go b/pkg/sentry/kernel/ipc_namespace.go
index b87e40dd1..9545bb5ef 100644
--- a/pkg/sentry/kernel/ipc_namespace.go
+++ b/pkg/sentry/kernel/ipc_namespace.go
@@ -41,7 +41,7 @@ func NewIPCNamespace(userNS *auth.UserNamespace) *IPCNamespace {
semaphores: semaphore.NewRegistry(userNS),
shms: shm.NewRegistry(userNS),
}
- ns.EnableLeakCheck()
+ ns.InitRefs()
return ns
}
diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go
index 5b37e617a..c0acf2f50 100644
--- a/pkg/sentry/kernel/ipc_namespace_refs.go
+++ b/pkg/sentry/kernel/ipc_namespace_refs.go
@@ -20,9 +20,6 @@ var IPCNamespaceobj *IPCNamespace
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type IPCNamespaceRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type IPCNamespaceRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *IPCNamespaceRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *IPCNamespaceRefs) RefType() string {
return fmt.Sprintf("%T", IPCNamespaceobj)[1:]
@@ -58,8 +62,7 @@ func (r *IPCNamespaceRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *IPCNamespaceRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *IPCNamespaceRefs) ReadRefs() int64 {
//go:nosplit
func (r *IPCNamespaceRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *IPCNamespaceRefs) IncRef() {
//go:nosplit
func (r *IPCNamespaceRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *IPCNamespaceRefs) TryIncRef() bool {
//go:nosplit
func (r *IPCNamespaceRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if IPCNamespaceenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go
index 29bd0b80f..a9cc69b35 100644
--- a/pkg/sentry/kernel/process_group_refs.go
+++ b/pkg/sentry/kernel/process_group_refs.go
@@ -20,9 +20,6 @@ var ProcessGroupobj *ProcessGroup
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type ProcessGroupRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type ProcessGroupRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ProcessGroupRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *ProcessGroupRefs) RefType() string {
return fmt.Sprintf("%T", ProcessGroupobj)[1:]
@@ -58,8 +62,7 @@ func (r *ProcessGroupRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ProcessGroupRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *ProcessGroupRefs) ReadRefs() int64 {
//go:nosplit
func (r *ProcessGroupRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if ProcessGroupenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *ProcessGroupRefs) IncRef() {
//go:nosplit
func (r *ProcessGroupRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if ProcessGroupenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *ProcessGroupRefs) TryIncRef() bool {
//go:nosplit
func (r *ProcessGroupRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if ProcessGroupenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go
index 430cb131c..0856ff261 100644
--- a/pkg/sentry/kernel/session_refs.go
+++ b/pkg/sentry/kernel/session_refs.go
@@ -20,9 +20,6 @@ var Sessionobj *Session
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type SessionRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type SessionRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *SessionRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *SessionRefs) RefType() string {
return fmt.Sprintf("%T", Sessionobj)[1:]
@@ -58,8 +62,7 @@ func (r *SessionRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *SessionRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *SessionRefs) ReadRefs() int64 {
//go:nosplit
func (r *SessionRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if SessionenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *SessionRefs) IncRef() {
//go:nosplit
func (r *SessionRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if SessionenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *SessionRefs) TryIncRef() bool {
//go:nosplit
func (r *SessionRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if SessionenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/kernel/sessions.go b/pkg/sentry/kernel/sessions.go
index 5bddb0a36..0cd9e2533 100644
--- a/pkg/sentry/kernel/sessions.go
+++ b/pkg/sentry/kernel/sessions.go
@@ -295,7 +295,7 @@ func (tg *ThreadGroup) createSession() error {
id: SessionID(id),
leader: tg,
}
- s.EnableLeakCheck()
+ s.InitRefs()
// Create a new ProcessGroup, belonging to that Session.
// This also has a single reference (assigned below).
@@ -309,7 +309,7 @@ func (tg *ThreadGroup) createSession() error {
session: s,
ancestors: 0,
}
- pg.refs.EnableLeakCheck()
+ pg.refs.InitRefs()
// Tie them and return the result.
s.processGroups.PushBack(pg)
@@ -395,7 +395,7 @@ func (tg *ThreadGroup) CreateProcessGroup() error {
originator: tg,
session: tg.processGroup.session,
}
- pg.refs.EnableLeakCheck()
+ pg.refs.InitRefs()
if tg.leader.parent != nil && tg.leader.parent.tg.processGroup.session == pg.session {
pg.ancestors++
diff --git a/pkg/sentry/kernel/shm/shm.go b/pkg/sentry/kernel/shm/shm.go
index ebbebf46b..92d60ba78 100644
--- a/pkg/sentry/kernel/shm/shm.go
+++ b/pkg/sentry/kernel/shm/shm.go
@@ -251,7 +251,7 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi
creatorPID: pid,
changeTime: ktime.NowFromContext(ctx),
}
- shm.EnableLeakCheck()
+ shm.InitRefs()
// Find the next available ID.
for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ {
diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go
index 5f148594c..82ca1ed06 100644
--- a/pkg/sentry/kernel/shm/shm_refs.go
+++ b/pkg/sentry/kernel/shm/shm_refs.go
@@ -20,9 +20,6 @@ var Shmobj *Shm
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type ShmRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type ShmRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *ShmRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *ShmRefs) RefType() string {
return fmt.Sprintf("%T", Shmobj)[1:]
@@ -58,8 +62,7 @@ func (r *ShmRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *ShmRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *ShmRefs) ReadRefs() int64 {
//go:nosplit
func (r *ShmRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if ShmenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *ShmRefs) IncRef() {
//go:nosplit
func (r *ShmRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if ShmenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *ShmRefs) TryIncRef() bool {
//go:nosplit
func (r *ShmRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if ShmenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 7bf48cb2c..4c8cd38ed 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -252,7 +252,7 @@ func newAIOMappable(mfp pgalloc.MemoryFileProvider) (*aioMappable, error) {
return nil, err
}
m := aioMappable{mfp: mfp, fr: fr}
- m.EnableLeakCheck()
+ m.InitRefs()
return &m, nil
}
diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go
index 9d94bf879..500477c1f 100644
--- a/pkg/sentry/mm/aio_mappable_refs.go
+++ b/pkg/sentry/mm/aio_mappable_refs.go
@@ -20,9 +20,6 @@ var aioMappableobj *aioMappable
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type aioMappableRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type aioMappableRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *aioMappableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *aioMappableRefs) RefType() string {
return fmt.Sprintf("%T", aioMappableobj)[1:]
@@ -58,8 +62,7 @@ func (r *aioMappableRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *aioMappableRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *aioMappableRefs) ReadRefs() int64 {
//go:nosplit
func (r *aioMappableRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if aioMappableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *aioMappableRefs) IncRef() {
//go:nosplit
func (r *aioMappableRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if aioMappableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *aioMappableRefs) TryIncRef() bool {
//go:nosplit
func (r *aioMappableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if aioMappableenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index 2dbe5b751..48d8b6a2b 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -44,7 +44,7 @@ type SpecialMappable struct {
// Preconditions: fr.Length() != 0.
func NewSpecialMappable(name string, mfp pgalloc.MemoryFileProvider, fr memmap.FileRange) *SpecialMappable {
m := SpecialMappable{mfp: mfp, fr: fr, name: name}
- m.EnableLeakCheck()
+ m.InitRefs()
return &m
}
diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go
index f17d4361f..60b4b7e92 100644
--- a/pkg/sentry/mm/special_mappable_refs.go
+++ b/pkg/sentry/mm/special_mappable_refs.go
@@ -20,9 +20,6 @@ var SpecialMappableobj *SpecialMappable
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type SpecialMappableRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type SpecialMappableRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *SpecialMappableRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *SpecialMappableRefs) RefType() string {
return fmt.Sprintf("%T", SpecialMappableobj)[1:]
@@ -58,8 +62,7 @@ func (r *SpecialMappableRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *SpecialMappableRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *SpecialMappableRefs) ReadRefs() int64 {
//go:nosplit
func (r *SpecialMappableRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if SpecialMappableenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *SpecialMappableRefs) IncRef() {
//go:nosplit
func (r *SpecialMappableRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if SpecialMappableenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *SpecialMappableRefs) TryIncRef() bool {
//go:nosplit
func (r *SpecialMappableRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if SpecialMappableenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go
index aec464529..e69a17ca8 100644
--- a/pkg/sentry/socket/unix/socket_refs.go
+++ b/pkg/sentry/socket/unix/socket_refs.go
@@ -20,9 +20,6 @@ var socketOperationsobj *SocketOperations
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type socketOperationsRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type socketOperationsRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *socketOperationsRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *socketOperationsRefs) RefType() string {
return fmt.Sprintf("%T", socketOperationsobj)[1:]
@@ -58,8 +62,7 @@ func (r *socketOperationsRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *socketOperationsRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *socketOperationsRefs) ReadRefs() int64 {
//go:nosplit
func (r *socketOperationsRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if socketOperationsenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *socketOperationsRefs) IncRef() {
//go:nosplit
func (r *socketOperationsRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if socketOperationsenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *socketOperationsRefs) TryIncRef() bool {
//go:nosplit
func (r *socketOperationsRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if socketOperationsenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/socket/unix/socket_vfs2_refs.go b/pkg/sentry/socket/unix/socket_vfs2_refs.go
index 8794375b3..d9bdba0b3 100644
--- a/pkg/sentry/socket/unix/socket_vfs2_refs.go
+++ b/pkg/sentry/socket/unix/socket_vfs2_refs.go
@@ -20,9 +20,6 @@ var socketVFS2obj *SocketVFS2
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type socketVFS2Refs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type socketVFS2Refs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *socketVFS2Refs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *socketVFS2Refs) RefType() string {
return fmt.Sprintf("%T", socketVFS2obj)[1:]
@@ -58,8 +62,7 @@ func (r *socketVFS2Refs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *socketVFS2Refs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *socketVFS2Refs) ReadRefs() int64 {
//go:nosplit
func (r *socketVFS2Refs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if socketVFS2enableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *socketVFS2Refs) IncRef() {
//go:nosplit
func (r *socketVFS2Refs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if socketVFS2enableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *socketVFS2Refs) TryIncRef() bool {
//go:nosplit
func (r *socketVFS2Refs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if socketVFS2enableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/socket/unix/transport/connectioned.go b/pkg/sentry/socket/unix/transport/connectioned.go
index aa4f3c04d..6d9e502bd 100644
--- a/pkg/sentry/socket/unix/transport/connectioned.go
+++ b/pkg/sentry/socket/unix/transport/connectioned.go
@@ -142,9 +142,9 @@ func NewPair(ctx context.Context, stype linux.SockType, uid UniqueIDProvider) (E
}
q1 := &queue{ReaderQueue: a.Queue, WriterQueue: b.Queue, limit: initialLimit}
- q1.EnableLeakCheck()
+ q1.InitRefs()
q2 := &queue{ReaderQueue: b.Queue, WriterQueue: a.Queue, limit: initialLimit}
- q2.EnableLeakCheck()
+ q2.InitRefs()
if stype == linux.SOCK_STREAM {
a.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{q1}}
@@ -300,14 +300,14 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn
}
readQueue := &queue{ReaderQueue: ce.WaiterQueue(), WriterQueue: ne.Queue, limit: initialLimit}
- readQueue.EnableLeakCheck()
+ readQueue.InitRefs()
ne.connected = &connectedEndpoint{
endpoint: ce,
writeQueue: readQueue,
}
writeQueue := &queue{ReaderQueue: ne.Queue, WriterQueue: ce.WaiterQueue(), limit: initialLimit}
- writeQueue.EnableLeakCheck()
+ writeQueue.InitRefs()
if e.stype == linux.SOCK_STREAM {
ne.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{readQueue: writeQueue}}
} else {
diff --git a/pkg/sentry/socket/unix/transport/connectionless.go b/pkg/sentry/socket/unix/transport/connectionless.go
index f8aacca13..1406971bc 100644
--- a/pkg/sentry/socket/unix/transport/connectionless.go
+++ b/pkg/sentry/socket/unix/transport/connectionless.go
@@ -42,7 +42,7 @@ var (
func NewConnectionless(ctx context.Context) Endpoint {
ep := &connectionlessEndpoint{baseEndpoint{Queue: &waiter.Queue{}}}
q := queue{ReaderQueue: ep.Queue, WriterQueue: &waiter.Queue{}, limit: initialLimit}
- q.EnableLeakCheck()
+ q.InitRefs()
ep.receiver = &queueReceiver{readQueue: &q}
return ep
}
diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go
index ec67d4b14..679cb40e4 100644
--- a/pkg/sentry/socket/unix/transport/queue_refs.go
+++ b/pkg/sentry/socket/unix/transport/queue_refs.go
@@ -20,9 +20,6 @@ var queueobj *queue
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type queueRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type queueRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *queueRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *queueRefs) RefType() string {
return fmt.Sprintf("%T", queueobj)[1:]
@@ -58,8 +62,7 @@ func (r *queueRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *queueRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *queueRefs) ReadRefs() int64 {
//go:nosplit
func (r *queueRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if queueenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *queueRefs) IncRef() {
//go:nosplit
func (r *queueRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if queueenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *queueRefs) TryIncRef() bool {
//go:nosplit
func (r *queueRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if queueenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go
index adad485a9..b32bb7ba8 100644
--- a/pkg/sentry/socket/unix/unix.go
+++ b/pkg/sentry/socket/unix/unix.go
@@ -80,7 +80,7 @@ func NewWithDirent(ctx context.Context, d *fs.Dirent, ep transport.Endpoint, sty
stype: stype,
},
}
- s.EnableLeakCheck()
+ s.InitRefs()
return fs.NewFile(ctx, d, flags, &s)
}
diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go
index 7a78444dc..eaf0b0d26 100644
--- a/pkg/sentry/socket/unix/unix_vfs2.go
+++ b/pkg/sentry/socket/unix/unix_vfs2.go
@@ -80,7 +80,7 @@ func NewFileDescription(ep transport.Endpoint, stype linux.SockType, flags uint3
stype: stype,
},
}
- sock.EnableLeakCheck()
+ sock.InitRefs()
sock.LockFD.Init(locks)
vfsfd := &sock.vfsfd
if err := vfsfd.Init(sock, flags, mnt, d, &vfs.FileDescriptionOptions{
diff --git a/pkg/sentry/vfs/file_description.go b/pkg/sentry/vfs/file_description.go
index 546e445aa..936f9fc71 100644
--- a/pkg/sentry/vfs/file_description.go
+++ b/pkg/sentry/vfs/file_description.go
@@ -133,7 +133,7 @@ func (fd *FileDescription) Init(impl FileDescriptionImpl, flags uint32, mnt *Mou
}
}
- fd.EnableLeakCheck()
+ fd.InitRefs()
// Remove "file creation flags" to mirror the behavior from file.f_flags in
// fs/open.c:do_dentry_open.
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
index 5cc5fe104..1e6d3f5af 100644
--- a/pkg/sentry/vfs/file_description_refs.go
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -20,9 +20,6 @@ var FileDescriptionobj *FileDescription
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type FileDescriptionRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type FileDescriptionRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FileDescriptionRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *FileDescriptionRefs) RefType() string {
return fmt.Sprintf("%T", FileDescriptionobj)[1:]
@@ -58,8 +62,7 @@ func (r *FileDescriptionRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FileDescriptionRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *FileDescriptionRefs) ReadRefs() int64 {
//go:nosplit
func (r *FileDescriptionRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if FileDescriptionenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *FileDescriptionRefs) IncRef() {
//go:nosplit
func (r *FileDescriptionRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *FileDescriptionRefs) TryIncRef() bool {
//go:nosplit
func (r *FileDescriptionRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/vfs/filesystem.go b/pkg/sentry/vfs/filesystem.go
index c93d94634..2c4b81e78 100644
--- a/pkg/sentry/vfs/filesystem.go
+++ b/pkg/sentry/vfs/filesystem.go
@@ -48,7 +48,7 @@ type Filesystem struct {
// Init must be called before first use of fs.
func (fs *Filesystem) Init(vfsObj *VirtualFilesystem, fsType FilesystemType, impl FilesystemImpl) {
- fs.EnableLeakCheck()
+ fs.InitRefs()
fs.vfs = vfsObj
fs.fsType = fsType
fs.impl = impl
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
index 6b403c04a..75da47bef 100644
--- a/pkg/sentry/vfs/filesystem_refs.go
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -20,9 +20,6 @@ var Filesystemobj *Filesystem
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type FilesystemRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type FilesystemRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FilesystemRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *FilesystemRefs) RefType() string {
return fmt.Sprintf("%T", Filesystemobj)[1:]
@@ -58,8 +62,7 @@ func (r *FilesystemRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *FilesystemRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *FilesystemRefs) ReadRefs() int64 {
//go:nosplit
func (r *FilesystemRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if FilesystemenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *FilesystemRefs) IncRef() {
//go:nosplit
func (r *FilesystemRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if FilesystemenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *FilesystemRefs) TryIncRef() bool {
//go:nosplit
func (r *FilesystemRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if FilesystemenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index 3ea981ad4..d865fd603 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -169,7 +169,7 @@ func (vfs *VirtualFilesystem) NewMountNamespace(ctx context.Context, creds *auth
Owner: creds.UserNamespace,
mountpoints: make(map[*Dentry]uint32),
}
- mntns.EnableLeakCheck()
+ mntns.InitRefs()
mntns.root = newMount(vfs, fs, root, mntns, opts)
return mntns, nil
}
@@ -477,7 +477,9 @@ func (mnt *Mount) tryIncMountedRef() bool {
return false
}
if atomic.CompareAndSwapInt64(&mnt.refs, r, r+1) {
- refsvfs2.LogTryIncRef(mnt, r+1)
+ if mnt.LogRefs() {
+ refsvfs2.LogTryIncRef(mnt, r+1)
+ }
return true
}
}
@@ -488,12 +490,17 @@ func (mnt *Mount) IncRef() {
// In general, negative values for mnt.refs are valid because the MSB is
// the eager-unmount bit.
r := atomic.AddInt64(&mnt.refs, 1)
- refsvfs2.LogIncRef(mnt, r)
+ if mnt.LogRefs() {
+ refsvfs2.LogIncRef(mnt, r)
+ }
}
// DecRef decrements mnt's reference count.
func (mnt *Mount) DecRef(ctx context.Context) {
r := atomic.AddInt64(&mnt.refs, -1)
+ if mnt.LogRefs() {
+ refsvfs2.LogDecRef(mnt, r)
+ }
if r&^math.MinInt64 == 0 { // mask out MSB
refsvfs2.Unregister(mnt)
mnt.destroy(ctx)
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
index 7c39d445f..bd79fb8a7 100644
--- a/pkg/sentry/vfs/mount_namespace_refs.go
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -20,9 +20,6 @@ var MountNamespaceobj *MountNamespace
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type MountNamespaceRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type MountNamespaceRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *MountNamespaceRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *MountNamespaceRefs) RefType() string {
return fmt.Sprintf("%T", MountNamespaceobj)[1:]
@@ -58,8 +62,7 @@ func (r *MountNamespaceRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *MountNamespaceRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *MountNamespaceRefs) ReadRefs() int64 {
//go:nosplit
func (r *MountNamespaceRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if MountNamespaceenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *MountNamespaceRefs) IncRef() {
//go:nosplit
func (r *MountNamespaceRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *MountNamespaceRefs) TryIncRef() bool {
//go:nosplit
func (r *MountNamespaceRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {
diff --git a/pkg/tcpip/link/tun/device.go b/pkg/tcpip/link/tun/device.go
index cda6328a2..4c14f55d3 100644
--- a/pkg/tcpip/link/tun/device.go
+++ b/pkg/tcpip/link/tun/device.go
@@ -157,7 +157,7 @@ func attachOrCreateNIC(s *stack.Stack, name, prefix string, linkCaps stack.LinkE
name: name,
isTap: prefix == "tap",
}
- endpoint.EnableLeakCheck()
+ endpoint.InitRefs()
endpoint.Endpoint.LinkEPCapabilities = linkCaps
if endpoint.name == "" {
endpoint.name = fmt.Sprintf("%s%d", prefix, id)
diff --git a/pkg/tcpip/link/tun/tun_endpoint_refs.go b/pkg/tcpip/link/tun/tun_endpoint_refs.go
index 850d05c38..dc164902b 100644
--- a/pkg/tcpip/link/tun/tun_endpoint_refs.go
+++ b/pkg/tcpip/link/tun/tun_endpoint_refs.go
@@ -20,9 +20,6 @@ var tunEndpointobj *tunEndpoint
// Refs implements refs.RefCounter. It keeps a reference count using atomic
// operations and calls the destructor when the count reaches zero.
//
-// Note that the number of references is actually refCount + 1 so that a default
-// zero-value Refs object contains one reference.
-//
// +stateify savable
type tunEndpointRefs struct {
// refCount is composed of two fields:
@@ -35,6 +32,13 @@ type tunEndpointRefs struct {
refCount int64
}
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *tunEndpointRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
// RefType implements refsvfs2.CheckedObject.RefType.
func (r *tunEndpointRefs) RefType() string {
return fmt.Sprintf("%T", tunEndpointobj)[1:]
@@ -58,8 +62,7 @@ func (r *tunEndpointRefs) EnableLeakCheck() {
// ReadRefs returns the current number of references. The returned count is
// inherently racy and is unsafe to use without external synchronization.
func (r *tunEndpointRefs) ReadRefs() int64 {
-
- return atomic.LoadInt64(&r.refCount) + 1
+ return atomic.LoadInt64(&r.refCount)
}
// IncRef implements refs.RefCounter.IncRef.
@@ -67,8 +70,10 @@ func (r *tunEndpointRefs) ReadRefs() int64 {
//go:nosplit
func (r *tunEndpointRefs) IncRef() {
v := atomic.AddInt64(&r.refCount, 1)
- refsvfs2.LogIncRef(r, v+1)
- if v <= 0 {
+ if tunEndpointenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
}
}
@@ -82,14 +87,16 @@ func (r *tunEndpointRefs) IncRef() {
//go:nosplit
func (r *tunEndpointRefs) TryIncRef() bool {
const speculativeRef = 1 << 32
- if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
atomic.AddInt64(&r.refCount, -speculativeRef)
return false
}
v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
- refsvfs2.LogTryIncRef(r, v+1)
+ if tunEndpointenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
return true
}
@@ -107,12 +114,14 @@ func (r *tunEndpointRefs) TryIncRef() bool {
//go:nosplit
func (r *tunEndpointRefs) DecRef(destroy func()) {
v := atomic.AddInt64(&r.refCount, -1)
- refsvfs2.LogDecRef(r, v+1)
+ if tunEndpointenableLogging {
+ refsvfs2.LogDecRef(r, v+1)
+ }
switch {
- case v < -1:
+ case v < 0:
panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
- case v == -1:
+ case v == 0:
refsvfs2.Unregister(r)
if destroy != nil {