diff options
Diffstat (limited to 'pkg/sentry')
30 files changed, 432 insertions, 567 deletions
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go index 1bd458b68..fe68f921d 100644 --- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go @@ -23,116 +23,6 @@ var _ marshal.Marshallable = (*SignalStack)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalInfo) SizeBytes() int { - return 16 + - 1*(128-16) -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalInfo) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) - dst = dst[4:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - for idx := 0; idx < (128-16); idx++ { - dst[0] = byte(s.Fields[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalInfo) UnmarshalBytes(src []byte) { - s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - for idx := 0; idx < (128-16); idx++ { - s.Fields[idx] = src[0] - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalInfo) Packed() bool { - return true -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalInfo) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(s)) -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalInfo) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(s), src) -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - -// SizeBytes implements marshal.Marshallable.SizeBytes. func (s *SignalAct) SizeBytes() int { return 24 + (*linux.SignalSet)(nil).SizeBytes() @@ -370,3 +260,113 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } +// SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalInfo) SizeBytes() int { + return 16 + + 1*(128-16) +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalInfo) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno)) + dst = dst[4:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + for idx := 0; idx < (128-16); idx++ { + dst[0] = byte(s.Fields[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalInfo) UnmarshalBytes(src []byte) { + s.Signo = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Errno = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + s.Code = int32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + for idx := 0; idx < (128-16); idx++ { + s.Fields[idx] = src[0] + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalInfo) Packed() bool { + return true +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalInfo) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(s)) +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalInfo) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(s), src) +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) { + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go index b8667cdb9..aac25375e 100644 --- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go +++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go @@ -27,6 +27,174 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil) var _ marshal.Marshallable = (*linux.SignalSet)(nil) // SizeBytes implements marshal.Marshallable.SizeBytes. +func (s *SignalContext64) SizeBytes() int { + return 32 + + 8*31 + + 1*8 + + (*FpsimdContext)(nil).SizeBytes() + + 1*3568 +} + +// MarshalBytes implements marshal.Marshallable.MarshalBytes. +func (s *SignalContext64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) + dst = dst[8:] + for idx := 0; idx < 31; idx++ { + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) + dst = dst[8:] + } + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) + dst = dst[8:] + for idx := 0; idx < 8; idx++ { + dst[0] = byte(s._pad[idx]) + dst = dst[1:] + } + s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) + dst = dst[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + dst[0] = byte(s.Reserved[idx]) + dst = dst[1:] + } +} + +// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. +func (s *SignalContext64) UnmarshalBytes(src []byte) { + s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 31; idx++ { + s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + } + s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + for idx := 0; idx < 8; idx++ { + s._pad[idx] = src[0] + src = src[1:] + } + s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) + src = src[s.Fpsimd64.SizeBytes():] + for idx := 0; idx < 3568; idx++ { + s.Reserved[idx] = uint8(src[0]) + src = src[1:] + } +} + +// Packed implements marshal.Marshallable.Packed. +//go:nosplit +func (s *SignalContext64) Packed() bool { + return s.Fpsimd64.Packed() +} + +// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. +func (s *SignalContext64) MarshalUnsafe(dst []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(s)) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. + s.MarshalBytes(dst) + } +} + +// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. +func (s *SignalContext64) UnmarshalUnsafe(src []byte) { + if s.Fpsimd64.Packed() { + safecopy.CopyOut(unsafe.Pointer(s), src) + } else { + // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + s.UnmarshalBytes(src) + } +} + +// CopyOutN implements marshal.Marshallable.CopyOutN. +//go:nosplit +func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + s.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// CopyOut implements marshal.Marshallable.CopyOut. +//go:nosplit +func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + return s.CopyOutN(cc, addr, s.SizeBytes()) +} + +// CopyIn implements marshal.Marshallable.CopyIn. +//go:nosplit +func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + s.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return length, err +} + +// WriteTo implements io.WriterTo.WriteTo. +func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { + if !s.Fpsimd64.Packed() { + // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, s.SizeBytes()) + s.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + + // Construct a slice backed by dst's underlying memory. + var buf []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) + hdr.Len = s.SizeBytes() + hdr.Cap = s.SizeBytes() + + length, err := writer.Write(buf) + // Since we bypassed the compiler's escape analysis, indicate that s + // must live until the use above. + runtime.KeepAlive(s) // escapes: replaced by intrinsic. + return int64(length), err +} + +// SizeBytes implements marshal.Marshallable.SizeBytes. func (a *aarch64Ctx) SizeBytes() int { return 8 } @@ -422,171 +590,3 @@ func (u *UContext64) WriteTo(writer io.Writer) (int64, error) { return int64(length), err } -// SizeBytes implements marshal.Marshallable.SizeBytes. -func (s *SignalContext64) SizeBytes() int { - return 32 + - 8*31 + - 1*8 + - (*FpsimdContext)(nil).SizeBytes() + - 1*3568 -} - -// MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (s *SignalContext64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.FaultAddr)) - dst = dst[8:] - for idx := 0; idx < 31; idx++ { - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Regs[idx])) - dst = dst[8:] - } - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Sp)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pc)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(s.Pstate)) - dst = dst[8:] - for idx := 0; idx < 8; idx++ { - dst[0] = byte(s._pad[idx]) - dst = dst[1:] - } - s.Fpsimd64.MarshalBytes(dst[:s.Fpsimd64.SizeBytes()]) - dst = dst[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - dst[0] = byte(s.Reserved[idx]) - dst = dst[1:] - } -} - -// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (s *SignalContext64) UnmarshalBytes(src []byte) { - s.FaultAddr = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 31; idx++ { - s.Regs[idx] = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - } - s.Sp = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pc = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - s.Pstate = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - for idx := 0; idx < 8; idx++ { - s._pad[idx] = src[0] - src = src[1:] - } - s.Fpsimd64.UnmarshalBytes(src[:s.Fpsimd64.SizeBytes()]) - src = src[s.Fpsimd64.SizeBytes():] - for idx := 0; idx < 3568; idx++ { - s.Reserved[idx] = uint8(src[0]) - src = src[1:] - } -} - -// Packed implements marshal.Marshallable.Packed. -//go:nosplit -func (s *SignalContext64) Packed() bool { - return s.Fpsimd64.Packed() -} - -// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (s *SignalContext64) MarshalUnsafe(dst []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(s)) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to MarshalBytes. - s.MarshalBytes(dst) - } -} - -// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (s *SignalContext64) UnmarshalUnsafe(src []byte) { - if s.Fpsimd64.Packed() { - safecopy.CopyOut(unsafe.Pointer(s), src) - } else { - // Type SignalContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - s.UnmarshalBytes(src) - } -} - -// CopyOutN implements marshal.Marshallable.CopyOutN. -//go:nosplit -func (s *SignalContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - s.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// CopyOut implements marshal.Marshallable.CopyOut. -//go:nosplit -func (s *SignalContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - return s.CopyOutN(cc, addr, s.SizeBytes()) -} - -// CopyIn implements marshal.Marshallable.CopyIn. -//go:nosplit -func (s *SignalContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - s.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return length, err -} - -// WriteTo implements io.WriterTo.WriteTo. -func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) { - if !s.Fpsimd64.Packed() { - // Type SignalContext64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, s.SizeBytes()) - s.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - - // Construct a slice backed by dst's underlying memory. - var buf []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) - hdr.Len = s.SizeBytes() - hdr.Cap = s.SizeBytes() - - length, err := writer.Write(buf) - // Since we bypassed the compiler's escape analysis, indicate that s - // must live until the use above. - runtime.KeepAlive(s) // escapes: replaced by intrinsic. - return int64(length), err -} - diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go index 9246cf66e..155e7abcb 100644 --- a/pkg/sentry/fsimpl/devpts/root_inode_refs.go +++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go @@ -54,11 +54,6 @@ func (r *rootInodeRefs) LogRefs() bool { return rootInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *rootInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *rootInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *rootInodeRefs) TryIncRef() bool { func (r *rootInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if rootInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *rootInodeRefs) DecRef(destroy func()) { func (r *rootInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go index 37a39e976..e221f3b41 100644 --- a/pkg/sentry/fsimpl/fuse/inode_refs.go +++ b/pkg/sentry/fsimpl/fuse/inode_refs.go @@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool { return inodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *inodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *inodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool { func (r *inodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if inodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) { func (r *inodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go index 3f5f4ebc3..d784166ae 100644 --- a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go +++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go @@ -54,11 +54,6 @@ func (r *ConnectedEndpointRefs) LogRefs() bool { return ConnectedEndpointenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *ConnectedEndpointRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *ConnectedEndpointRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *ConnectedEndpointRefs) TryIncRef() bool { func (r *ConnectedEndpointRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if ConnectedEndpointenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *ConnectedEndpointRefs) DecRef(destroy func()) { func (r *ConnectedEndpointRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go index 4c850a7ac..8862c4a5e 100644 --- a/pkg/sentry/fsimpl/host/inode_refs.go +++ b/pkg/sentry/fsimpl/host/inode_refs.go @@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool { return inodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *inodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *inodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool { func (r *inodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if inodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) { func (r *inodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go index cdf6374a3..88cba9456 100644 --- a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go +++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go @@ -54,11 +54,6 @@ func (r *StaticDirectoryRefs) LogRefs() bool { return StaticDirectoryenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *StaticDirectoryRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *StaticDirectoryRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *StaticDirectoryRefs) TryIncRef() bool { func (r *StaticDirectoryRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if StaticDirectoryenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *StaticDirectoryRefs) DecRef(destroy func()) { func (r *StaticDirectoryRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go index 69b41668a..4409edd35 100644 --- a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go +++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go @@ -54,11 +54,6 @@ func (r *syntheticDirectoryRefs) LogRefs() bool { return syntheticDirectoryenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *syntheticDirectoryRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *syntheticDirectoryRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *syntheticDirectoryRefs) TryIncRef() bool { func (r *syntheticDirectoryRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if syntheticDirectoryenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *syntheticDirectoryRefs) DecRef(destroy func()) { func (r *syntheticDirectoryRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go index 4644809bd..390927c63 100644 --- a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go +++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go @@ -54,11 +54,6 @@ func (r *fdDirInodeRefs) LogRefs() bool { return fdDirInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *fdDirInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *fdDirInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *fdDirInodeRefs) TryIncRef() bool { func (r *fdDirInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if fdDirInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *fdDirInodeRefs) DecRef(destroy func()) { func (r *fdDirInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go index dbc7e3f5a..5c44e617f 100644 --- a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go +++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go @@ -54,11 +54,6 @@ func (r *fdInfoDirInodeRefs) LogRefs() bool { return fdInfoDirInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *fdInfoDirInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *fdInfoDirInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *fdInfoDirInodeRefs) TryIncRef() bool { func (r *fdInfoDirInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if fdInfoDirInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *fdInfoDirInodeRefs) DecRef(destroy func()) { func (r *fdInfoDirInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go index 993251646..4b655a383 100644 --- a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go +++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go @@ -54,11 +54,6 @@ func (r *subtasksInodeRefs) LogRefs() bool { return subtasksInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *subtasksInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *subtasksInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *subtasksInodeRefs) TryIncRef() bool { func (r *subtasksInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if subtasksInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *subtasksInodeRefs) DecRef(destroy func()) { func (r *subtasksInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go index 632251e75..1bc355b12 100644 --- a/pkg/sentry/fsimpl/proc/task_inode_refs.go +++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go @@ -54,11 +54,6 @@ func (r *taskInodeRefs) LogRefs() bool { return taskInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *taskInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *taskInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *taskInodeRefs) TryIncRef() bool { func (r *taskInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if taskInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *taskInodeRefs) DecRef(destroy func()) { func (r *taskInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go index 0b2af4269..af0c4f025 100644 --- a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go +++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go @@ -54,11 +54,6 @@ func (r *tasksInodeRefs) LogRefs() bool { return tasksInodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *tasksInodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *tasksInodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *tasksInodeRefs) TryIncRef() bool { func (r *tasksInodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if tasksInodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *tasksInodeRefs) DecRef(destroy func()) { func (r *tasksInodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go index a45aa7f78..0f8a6e0f2 100644 --- a/pkg/sentry/fsimpl/sys/dir_refs.go +++ b/pkg/sentry/fsimpl/sys/dir_refs.go @@ -54,11 +54,6 @@ func (r *dirRefs) LogRefs() bool { return direnableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *dirRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *dirRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *dirRefs) TryIncRef() bool { func (r *dirRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if direnableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *dirRefs) DecRef(destroy func()) { func (r *dirRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go index 51ee15409..a30d79603 100644 --- a/pkg/sentry/fsimpl/tmpfs/inode_refs.go +++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go @@ -54,11 +54,6 @@ func (r *inodeRefs) LogRefs() bool { return inodeenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *inodeRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *inodeRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *inodeRefs) TryIncRef() bool { func (r *inodeRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if inodeenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *inodeRefs) DecRef(destroy func()) { func (r *inodeRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/fd_table_refs.go b/pkg/sentry/kernel/fd_table_refs.go index f540ba371..ddbe50261 100644 --- a/pkg/sentry/kernel/fd_table_refs.go +++ b/pkg/sentry/kernel/fd_table_refs.go @@ -54,11 +54,6 @@ func (r *FDTableRefs) LogRefs() bool { return FDTableenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *FDTableRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FDTableRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *FDTableRefs) TryIncRef() bool { func (r *FDTableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if FDTableenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *FDTableRefs) DecRef(destroy func()) { func (r *FDTableRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/fs_context_refs.go b/pkg/sentry/kernel/fs_context_refs.go index 6510157c7..d97c82fc9 100644 --- a/pkg/sentry/kernel/fs_context_refs.go +++ b/pkg/sentry/kernel/fs_context_refs.go @@ -54,11 +54,6 @@ func (r *FSContextRefs) LogRefs() bool { return FSContextenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *FSContextRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FSContextRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *FSContextRefs) TryIncRef() bool { func (r *FSContextRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if FSContextenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *FSContextRefs) DecRef(destroy func()) { func (r *FSContextRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/ipc_namespace_refs.go b/pkg/sentry/kernel/ipc_namespace_refs.go index c0acf2f50..a2a2d3458 100644 --- a/pkg/sentry/kernel/ipc_namespace_refs.go +++ b/pkg/sentry/kernel/ipc_namespace_refs.go @@ -54,11 +54,6 @@ func (r *IPCNamespaceRefs) LogRefs() bool { return IPCNamespaceenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *IPCNamespaceRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *IPCNamespaceRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *IPCNamespaceRefs) TryIncRef() bool { func (r *IPCNamespaceRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if IPCNamespaceenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *IPCNamespaceRefs) DecRef(destroy func()) { func (r *IPCNamespaceRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/process_group_refs.go b/pkg/sentry/kernel/process_group_refs.go index a9cc69b35..51b0fcd34 100644 --- a/pkg/sentry/kernel/process_group_refs.go +++ b/pkg/sentry/kernel/process_group_refs.go @@ -54,11 +54,6 @@ func (r *ProcessGroupRefs) LogRefs() bool { return ProcessGroupenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *ProcessGroupRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *ProcessGroupRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *ProcessGroupRefs) TryIncRef() bool { func (r *ProcessGroupRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if ProcessGroupenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *ProcessGroupRefs) DecRef(destroy func()) { func (r *ProcessGroupRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/session_refs.go b/pkg/sentry/kernel/session_refs.go index 0856ff261..84919943d 100644 --- a/pkg/sentry/kernel/session_refs.go +++ b/pkg/sentry/kernel/session_refs.go @@ -54,11 +54,6 @@ func (r *SessionRefs) LogRefs() bool { return SessionenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *SessionRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *SessionRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *SessionRefs) TryIncRef() bool { func (r *SessionRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if SessionenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *SessionRefs) DecRef(destroy func()) { func (r *SessionRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/kernel/shm/shm_refs.go b/pkg/sentry/kernel/shm/shm_refs.go index 82ca1ed06..f550c7ca1 100644 --- a/pkg/sentry/kernel/shm/shm_refs.go +++ b/pkg/sentry/kernel/shm/shm_refs.go @@ -54,11 +54,6 @@ func (r *ShmRefs) LogRefs() bool { return ShmenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *ShmRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *ShmRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *ShmRefs) TryIncRef() bool { func (r *ShmRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if ShmenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *ShmRefs) DecRef(destroy func()) { func (r *ShmRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/mm/aio_mappable_refs.go b/pkg/sentry/mm/aio_mappable_refs.go index 500477c1f..4b87070bf 100644 --- a/pkg/sentry/mm/aio_mappable_refs.go +++ b/pkg/sentry/mm/aio_mappable_refs.go @@ -54,11 +54,6 @@ func (r *aioMappableRefs) LogRefs() bool { return aioMappableenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *aioMappableRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *aioMappableRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *aioMappableRefs) TryIncRef() bool { func (r *aioMappableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if aioMappableenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *aioMappableRefs) DecRef(destroy func()) { func (r *aioMappableRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/mm/special_mappable_refs.go b/pkg/sentry/mm/special_mappable_refs.go index 60b4b7e92..6c89e0f7e 100644 --- a/pkg/sentry/mm/special_mappable_refs.go +++ b/pkg/sentry/mm/special_mappable_refs.go @@ -54,11 +54,6 @@ func (r *SpecialMappableRefs) LogRefs() bool { return SpecialMappableenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *SpecialMappableRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *SpecialMappableRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *SpecialMappableRefs) TryIncRef() bool { func (r *SpecialMappableRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if SpecialMappableenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *SpecialMappableRefs) DecRef(destroy func()) { func (r *SpecialMappableRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/socket/unix/socket_refs.go b/pkg/sentry/socket/unix/socket_refs.go index e69a17ca8..2a7fcb253 100644 --- a/pkg/sentry/socket/unix/socket_refs.go +++ b/pkg/sentry/socket/unix/socket_refs.go @@ -54,11 +54,6 @@ func (r *socketOperationsRefs) LogRefs() bool { return socketOperationsenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *socketOperationsRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *socketOperationsRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *socketOperationsRefs) TryIncRef() bool { func (r *socketOperationsRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if socketOperationsenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *socketOperationsRefs) DecRef(destroy func()) { func (r *socketOperationsRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/socket/unix/socket_vfs2_refs.go b/pkg/sentry/socket/unix/socket_vfs2_refs.go index d9bdba0b3..f10033260 100644 --- a/pkg/sentry/socket/unix/socket_vfs2_refs.go +++ b/pkg/sentry/socket/unix/socket_vfs2_refs.go @@ -54,11 +54,6 @@ func (r *socketVFS2Refs) LogRefs() bool { return socketVFS2enableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *socketVFS2Refs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *socketVFS2Refs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *socketVFS2Refs) TryIncRef() bool { func (r *socketVFS2Refs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if socketVFS2enableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *socketVFS2Refs) DecRef(destroy func()) { func (r *socketVFS2Refs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/socket/unix/transport/queue_refs.go b/pkg/sentry/socket/unix/transport/queue_refs.go index 679cb40e4..42c5b7ce0 100644 --- a/pkg/sentry/socket/unix/transport/queue_refs.go +++ b/pkg/sentry/socket/unix/transport/queue_refs.go @@ -54,11 +54,6 @@ func (r *queueRefs) LogRefs() bool { return queueenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *queueRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *queueRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *queueRefs) TryIncRef() bool { func (r *queueRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if queueenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *queueRefs) DecRef(destroy func()) { func (r *queueRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go index 6886c63d1..956643160 100644 --- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go +++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go @@ -458,26 +458,49 @@ func (u *userSockFprog) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *multipleMessageHeader64) SizeBytes() int { - return 8 + - (*MessageHeader64)(nil).SizeBytes() +func (m *MessageHeader64) SizeBytes() int { + return 56 } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { - m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) - dst = dst[m.msgHdr.SizeBytes():] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) +func (m *MessageHeader64) MarshalBytes(dst []byte) { + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) + dst = dst[4:] + // Padding: dst[:sizeof(uint32)] ~= uint32(0) + dst = dst[4:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) + dst = dst[8:] + usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) + dst = dst[8:] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { - m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) - src = src[m.msgHdr.SizeBytes():] - m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) +func (m *MessageHeader64) UnmarshalBytes(src []byte) { + m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) + src = src[4:] + // Padding: var _ uint32 ~= src[:sizeof(uint32)] + src = src[4:] + m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) + src = src[8:] + m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -485,40 +508,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *multipleMessageHeader64) Packed() bool { - return m.msgHdr.Packed() +func (m *MessageHeader64) Packed() bool { + return true } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { - if m.msgHdr.Packed() { - safecopy.CopyIn(dst, unsafe.Pointer(m)) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. - m.MarshalBytes(dst) - } +func (m *MessageHeader64) MarshalUnsafe(dst []byte) { + safecopy.CopyIn(dst, unsafe.Pointer(m)) } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { - if m.msgHdr.Packed() { - safecopy.CopyOut(unsafe.Pointer(m), src) - } else { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. - m.UnmarshalBytes(src) - } +func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { + safecopy.CopyOut(unsafe.Pointer(m), src) } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - m.MarshalBytes(buf) // escapes: fallback. - return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. - } - +func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -535,23 +541,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem. // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. - buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. - length, err := cc.CopyInBytes(addr, buf) // escapes: okay. - // Unmarshal unconditionally. If we had a short copy-in, this results in a - // partially unmarshalled struct. - m.UnmarshalBytes(buf) // escapes: fallback. - return length, err - } - +func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -567,15 +563,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad } // WriteTo implements io.WriterTo.WriteTo. -func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { - if !m.msgHdr.Packed() { - // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. - buf := make([]byte, m.SizeBytes()) - m.MarshalBytes(buf) - length, err := writer.Write(buf) - return int64(length), err - } - +func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -591,49 +579,26 @@ func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { } // SizeBytes implements marshal.Marshallable.SizeBytes. -func (m *MessageHeader64) SizeBytes() int { - return 56 +func (m *multipleMessageHeader64) SizeBytes() int { + return 8 + + (*MessageHeader64)(nil).SizeBytes() } // MarshalBytes implements marshal.Marshallable.MarshalBytes. -func (m *MessageHeader64) MarshalBytes(dst []byte) { - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen)) - dst = dst[4:] - // Padding: dst[:sizeof(uint32)] ~= uint32(0) - dst = dst[4:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control)) - dst = dst[8:] - usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen)) - dst = dst[8:] - usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags)) +func (m *multipleMessageHeader64) MarshalBytes(dst []byte) { + m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()]) + dst = dst[m.msgHdr.SizeBytes():] + usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen)) dst = dst[4:] // Padding: dst[:sizeof(int32)] ~= int32(0) dst = dst[4:] } // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. -func (m *MessageHeader64) UnmarshalBytes(src []byte) { - m.Name = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4])) - src = src[4:] - // Padding: var _ uint32 ~= src[:sizeof(uint32)] - src = src[4:] - m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Control = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8])) - src = src[8:] - m.Flags = int32(usermem.ByteOrder.Uint32(src[:4])) +func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) { + m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()]) + src = src[m.msgHdr.SizeBytes():] + m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4])) src = src[4:] // Padding: var _ int32 ~= src[:sizeof(int32)] src = src[4:] @@ -641,23 +606,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) { // Packed implements marshal.Marshallable.Packed. //go:nosplit -func (m *MessageHeader64) Packed() bool { - return true +func (m *multipleMessageHeader64) Packed() bool { + return m.msgHdr.Packed() } // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. -func (m *MessageHeader64) MarshalUnsafe(dst []byte) { - safecopy.CopyIn(dst, unsafe.Pointer(m)) +func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) { + if m.msgHdr.Packed() { + safecopy.CopyIn(dst, unsafe.Pointer(m)) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes. + m.MarshalBytes(dst) + } } // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. -func (m *MessageHeader64) UnmarshalUnsafe(src []byte) { - safecopy.CopyOut(unsafe.Pointer(m), src) +func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) { + if m.msgHdr.Packed() { + safecopy.CopyOut(unsafe.Pointer(m), src) + } else { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes. + m.UnmarshalBytes(src) + } } // CopyOutN implements marshal.Marshallable.CopyOutN. //go:nosplit -func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { +func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + m.MarshalBytes(buf) // escapes: fallback. + return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -674,13 +656,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li // CopyOut implements marshal.Marshallable.CopyOut. //go:nosplit -func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) { return m.CopyOutN(cc, addr, m.SizeBytes()) } // CopyIn implements marshal.Marshallable.CopyIn. //go:nosplit -func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { +func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes. + buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay. + length, err := cc.CopyInBytes(addr, buf) // escapes: okay. + // Unmarshal unconditionally. If we had a short copy-in, this results in a + // partially unmarshalled struct. + m.UnmarshalBytes(buf) // escapes: fallback. + return length, err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) @@ -696,7 +688,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int } // WriteTo implements io.WriterTo.WriteTo. -func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) { +func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) { + if !m.msgHdr.Packed() { + // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes. + buf := make([]byte, m.SizeBytes()) + m.MarshalBytes(buf) + length, err := writer.Write(buf) + return int64(length), err + } + // Construct a slice backed by dst's underlying memory. var buf []byte hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go index 1e6d3f5af..5d4634ba7 100644 --- a/pkg/sentry/vfs/file_description_refs.go +++ b/pkg/sentry/vfs/file_description_refs.go @@ -54,11 +54,6 @@ func (r *FileDescriptionRefs) LogRefs() bool { return FileDescriptionenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *FileDescriptionRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FileDescriptionRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *FileDescriptionRefs) TryIncRef() bool { func (r *FileDescriptionRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if FileDescriptionenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *FileDescriptionRefs) DecRef(destroy func()) { func (r *FileDescriptionRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go index 75da47bef..2a98a8ead 100644 --- a/pkg/sentry/vfs/filesystem_refs.go +++ b/pkg/sentry/vfs/filesystem_refs.go @@ -54,11 +54,6 @@ func (r *FilesystemRefs) LogRefs() bool { return FilesystemenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *FilesystemRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *FilesystemRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *FilesystemRefs) TryIncRef() bool { func (r *FilesystemRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if FilesystemenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *FilesystemRefs) DecRef(destroy func()) { func (r *FilesystemRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go index bd79fb8a7..d5d48a7cd 100644 --- a/pkg/sentry/vfs/mount_namespace_refs.go +++ b/pkg/sentry/vfs/mount_namespace_refs.go @@ -54,11 +54,6 @@ func (r *MountNamespaceRefs) LogRefs() bool { return MountNamespaceenableLogging } -// EnableLeakCheck enables reference leak checking on r. -func (r *MountNamespaceRefs) EnableLeakCheck() { - refsvfs2.Register(r) -} - // ReadRefs returns the current number of references. The returned count is // inherently racy and is unsafe to use without external synchronization. func (r *MountNamespaceRefs) ReadRefs() int64 { @@ -115,7 +110,7 @@ func (r *MountNamespaceRefs) TryIncRef() bool { func (r *MountNamespaceRefs) DecRef(destroy func()) { v := atomic.AddInt64(&r.refCount, -1) if MountNamespaceenableLogging { - refsvfs2.LogDecRef(r, v+1) + refsvfs2.LogDecRef(r, v) } switch { case v < 0: @@ -132,6 +127,6 @@ func (r *MountNamespaceRefs) DecRef(destroy func()) { func (r *MountNamespaceRefs) afterLoad() { if r.ReadRefs() > 0 { - r.EnableLeakCheck() + refsvfs2.Register(r) } } |