diff options
Diffstat (limited to 'pkg/sentry/socket/unix')
-rw-r--r-- | pkg/sentry/socket/unix/socket_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/socket_vfs2_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/transport/connectioned.go | 8 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/transport/connectionless.go | 2 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/transport/queue_refs.go | 33 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/unix.go | 2 | ||||
-rw-r--r-- | pkg/sentry/socket/unix/unix_vfs2.go | 2 |
7 files changed, 70 insertions, 43 deletions
diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go index aec464529..e69a17ca8 100644 --- a/pkg/sentry/socket/unix/socket_refs.go +++ b/pkg/sentry/socket/unix/socket_refs.go @@ -20,9 +20,6 @@ var socketOperationsobj *SocketOperations // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type socketOperationsRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type socketOperationsRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *socketOperationsRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *socketOperationsRefs) RefType() string { return fmt.Sprintf("%T", socketOperationsobj)[1:] @@ -58,8 +62,7 @@ func (r *socketOperationsRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *socketOperationsRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *socketOperationsRefs) ReadRefs() int64 { //go:nosplit func (r *socketOperationsRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if socketOperationsenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *socketOperationsRefs) IncRef() { //go:nosplit func (r *socketOperationsRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if socketOperationsenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *socketOperationsRefs) TryIncRef() bool { //go:nosplit func (r *socketOperationsRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if socketOperationsenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/socket/unix/socket_vfs2_refs.go b/pkg/sentry/socket/unix/socket_vfs2_refs.go index 8794375b3..d9bdba0b3 100644 --- a/pkg/sentry/socket/unix/socket_vfs2_refs.go +++ b/pkg/sentry/socket/unix/socket_vfs2_refs.go @@ -20,9 +20,6 @@ var socketVFS2obj *SocketVFS2 // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type socketVFS2Refs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type socketVFS2Refs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *socketVFS2Refs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *socketVFS2Refs) RefType() string { return fmt.Sprintf("%T", socketVFS2obj)[1:] @@ -58,8 +62,7 @@ func (r *socketVFS2Refs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *socketVFS2Refs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *socketVFS2Refs) ReadRefs() int64 { //go:nosplit func (r *socketVFS2Refs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if socketVFS2enableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *socketVFS2Refs) IncRef() { //go:nosplit func (r *socketVFS2Refs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if socketVFS2enableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *socketVFS2Refs) TryIncRef() bool { //go:nosplit func (r *socketVFS2Refs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if socketVFS2enableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/socket/unix/transport/connectioned.go b/pkg/sentry/socket/unix/transport/connectioned.go index aa4f3c04d..6d9e502bd 100644 --- a/pkg/sentry/socket/unix/transport/connectioned.go +++ b/pkg/sentry/socket/unix/transport/connectioned.go @@ -142,9 +142,9 @@ func NewPair(ctx context.Context, stype linux.SockType, uid UniqueIDProvider) (E } q1 := &queue{ReaderQueue: a.Queue, WriterQueue: b.Queue, limit: initialLimit} - q1.EnableLeakCheck() + q1.InitRefs() q2 := &queue{ReaderQueue: b.Queue, WriterQueue: a.Queue, limit: initialLimit} - q2.EnableLeakCheck() + q2.InitRefs() if stype == linux.SOCK_STREAM { a.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{q1}} @@ -300,14 +300,14 @@ func (e *connectionedEndpoint) BidirectionalConnect(ctx context.Context, ce Conn } readQueue := &queue{ReaderQueue: ce.WaiterQueue(), WriterQueue: ne.Queue, limit: initialLimit} - readQueue.EnableLeakCheck() + readQueue.InitRefs() ne.connected = &connectedEndpoint{ endpoint: ce, writeQueue: readQueue, } writeQueue := &queue{ReaderQueue: ne.Queue, WriterQueue: ce.WaiterQueue(), limit: initialLimit} - writeQueue.EnableLeakCheck() + writeQueue.InitRefs() if e.stype == linux.SOCK_STREAM { ne.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{readQueue: writeQueue}} } else { diff --git a/pkg/sentry/socket/unix/transport/connectionless.go b/pkg/sentry/socket/unix/transport/connectionless.go index f8aacca13..1406971bc 100644 --- a/pkg/sentry/socket/unix/transport/connectionless.go +++ b/pkg/sentry/socket/unix/transport/connectionless.go @@ -42,7 +42,7 @@ var ( func NewConnectionless(ctx context.Context) Endpoint { ep := &connectionlessEndpoint{baseEndpoint{Queue: &waiter.Queue{}}} q := queue{ReaderQueue: ep.Queue, WriterQueue: &waiter.Queue{}, limit: initialLimit} - q.EnableLeakCheck() + q.InitRefs() ep.receiver = &queueReceiver{readQueue: &q} return ep } diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go index ec67d4b14..679cb40e4 100644 --- a/pkg/sentry/socket/unix/transport/queue_refs.go +++ b/pkg/sentry/socket/unix/transport/queue_refs.go @@ -20,9 +20,6 @@ var queueobj *queue // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. // -// Note that the number of references is actually refCount + 1 so that a default -// zero-value Refs object contains one reference. -// // +stateify savable type queueRefs struct { // refCount is composed of two fields: @@ -35,6 +32,13 @@ type queueRefs struct { refCount int64 } +// InitRefs initializes r with one reference and, if enabled, activates leak +// checking. +func (r *queueRefs) InitRefs() { + atomic.StoreInt64(&r.refCount, 1) + refsvfs2.Register(r) +} + // RefType implements refsvfs2.CheckedObject.RefType. func (r *queueRefs) RefType() string { return fmt.Sprintf("%T", queueobj)[1:] @@ -58,8 +62,7 @@ func (r *queueRefs) EnableLeakCheck() { // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *queueRefs) ReadRefs() int64 { - - return atomic.LoadInt64(&r.refCount) + 1 + return atomic.LoadInt64(&r.refCount) } // IncRef implements refs.RefCounter.IncRef. @@ -67,8 +70,10 @@ func (r *queueRefs) ReadRefs() int64 { //go:nosplit func (r *queueRefs) IncRef() { v := atomic.AddInt64(&r.refCount, 1) - refsvfs2.LogIncRef(r, v+1) - if v <= 0 { + if queueenableLogging { + refsvfs2.LogIncRef(r, v) + } + if v <= 1 { panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType())) } } @@ -82,14 +87,16 @@ func (r *queueRefs) IncRef() { //go:nosplit func (r *queueRefs) TryIncRef() bool { const speculativeRef = 1 << 32 - if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 { + if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 { atomic.AddInt64(&r.refCount, -speculativeRef) return false } v := atomic.AddInt64(&r.refCount, -speculativeRef+1) - refsvfs2.LogTryIncRef(r, v+1) + if queueenableLogging { + refsvfs2.LogTryIncRef(r, v) + } return true } @@ -107,12 +114,14 @@ func (r *queueRefs) TryIncRef() bool { //go:nosplit func (r *queueRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) - refsvfs2.LogDecRef(r, v+1) + if queueenableLogging { + refsvfs2.LogDecRef(r, v+1) + } switch { - case v < -1: + case v < 0: panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType())) - case v == -1: + case v == 0: refsvfs2.Unregister(r) if destroy != nil { diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go index adad485a9..b32bb7ba8 100644 --- a/pkg/sentry/socket/unix/unix.go +++ b/pkg/sentry/socket/unix/unix.go @@ -80,7 +80,7 @@ func NewWithDirent(ctx context.Context, d *fs.Dirent, ep transport.Endpoint, sty stype: stype, }, } - s.EnableLeakCheck() + s.InitRefs() return fs.NewFile(ctx, d, flags, &s) } diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go index 7a78444dc..eaf0b0d26 100644 --- a/pkg/sentry/socket/unix/unix_vfs2.go +++ b/pkg/sentry/socket/unix/unix_vfs2.go @@ -80,7 +80,7 @@ func NewFileDescription(ep transport.Endpoint, stype linux.SockType, flags uint3 stype: stype, }, } - sock.EnableLeakCheck() + sock.InitRefs() sock.LockFD.Init(locks) vfsfd := &sock.vfsfd if err := vfsfd.Init(sock, flags, mnt, d, &vfs.FileDescriptionOptions{ |