summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/abi/linux/linux_abi_autogen_unsafe.go20
-rw-r--r--pkg/refs/refcounter.go10
-rw-r--r--pkg/sentry/platform/ring0/defs_impl_arm64.go3
-rw-r--r--runsc/boot/loader.go3
4 files changed, 24 insertions, 12 deletions
diff --git a/pkg/abi/linux/linux_abi_autogen_unsafe.go b/pkg/abi/linux/linux_abi_autogen_unsafe.go
index 751e103f9..798e0418b 100644
--- a/pkg/abi/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/abi/linux/linux_abi_autogen_unsafe.go
@@ -160,7 +160,7 @@ func (s *Statx) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (s *Statx) UnmarshalUnsafe(src []byte) {
- if s.Ctime.Packed() && s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() {
+ if s.Mtime.Packed() && s.Atime.Packed() && s.Btime.Packed() && s.Ctime.Packed() {
safecopy.CopyOut(unsafe.Pointer(s), src)
} else {
// Type Statx doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -620,7 +620,7 @@ func (f *FUSEHeaderIn) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (f *FUSEHeaderIn) Packed() bool {
- return f.Opcode.Packed() && f.Unique.Packed()
+ return f.Unique.Packed() && f.Opcode.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
@@ -646,7 +646,7 @@ func (f *FUSEHeaderIn) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (f *FUSEHeaderIn) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !f.Unique.Packed() && f.Opcode.Packed() {
+ if !f.Opcode.Packed() && f.Unique.Packed() {
// Type FUSEHeaderIn doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(f.SizeBytes()) // escapes: okay.
f.MarshalBytes(buf) // escapes: fallback.
@@ -1630,12 +1630,12 @@ func (i *IPTEntry) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IPTEntry) Packed() bool {
- return i.IP.Packed() && i.Counters.Packed()
+ return i.Counters.Packed() && i.IP.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IPTEntry) MarshalUnsafe(dst []byte) {
- if i.Counters.Packed() && i.IP.Packed() {
+ if i.IP.Packed() && i.Counters.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IPTEntry doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -1818,12 +1818,12 @@ func (i *IPTIP) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
func (i *IPTIP) Packed() bool {
- return i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed()
+ return i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
func (i *IPTIP) MarshalUnsafe(dst []byte) {
- if i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
+ if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
safecopy.CopyIn(dst, unsafe.Pointer(i))
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to MarshalBytes.
@@ -1833,7 +1833,7 @@ func (i *IPTIP) MarshalUnsafe(dst []byte) {
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
func (i *IPTIP) UnmarshalUnsafe(src []byte) {
- if i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
+ if i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
safecopy.CopyOut(unsafe.Pointer(i), src)
} else {
// Type IPTIP doesn't have a packed layout in memory, fallback to UnmarshalBytes.
@@ -1844,7 +1844,7 @@ func (i *IPTIP) UnmarshalUnsafe(src []byte) {
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
func (i *IPTIP) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.DstMask.Packed() && i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to MarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
i.MarshalBytes(buf) // escapes: fallback.
@@ -1874,7 +1874,7 @@ func (i *IPTIP) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
func (i *IPTIP) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
- if !i.Src.Packed() && i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() {
+ if !i.Dst.Packed() && i.SrcMask.Packed() && i.DstMask.Packed() && i.Src.Packed() {
// Type IPTIP doesn't have a packed layout in memory, fall back to UnmarshalBytes.
buf := task.CopyScratchBuffer(i.SizeBytes()) // escapes: okay.
length, err := task.CopyInBytes(addr, buf) // escapes: okay.
diff --git a/pkg/refs/refcounter.go b/pkg/refs/refcounter.go
index 3f39edb66..d9d5e6bcb 100644
--- a/pkg/refs/refcounter.go
+++ b/pkg/refs/refcounter.go
@@ -475,3 +475,13 @@ func (r *AtomicRefCount) DecRefWithDestructor(ctx context.Context, destroy func(
func (r *AtomicRefCount) DecRef(ctx context.Context) {
r.DecRefWithDestructor(ctx, nil)
}
+
+// OnExit is called on sandbox exit. It runs GC to enqueue refcount finalizers,
+// which check for reference leaks. There is no way to guarantee that every
+// finalizer will run before exiting, but this at least ensures that they will
+// be discovered/enqueued by GC.
+func OnExit() {
+ if LeakMode(atomic.LoadUint32(&leakMode)) != NoLeakChecking {
+ runtime.GC()
+ }
+}
diff --git a/pkg/sentry/platform/ring0/defs_impl_arm64.go b/pkg/sentry/platform/ring0/defs_impl_arm64.go
index eda1e1484..9fb351879 100644
--- a/pkg/sentry/platform/ring0/defs_impl_arm64.go
+++ b/pkg/sentry/platform/ring0/defs_impl_arm64.go
@@ -4,10 +4,9 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/platform/ring0/pagetables"
+ "gvisor.dev/gvisor/pkg/usermem"
"io"
"reflect"
-
- "gvisor.dev/gvisor/pkg/usermem"
)
// Useful bits.
diff --git a/runsc/boot/loader.go b/runsc/boot/loader.go
index 533b9c5e7..40c6f99fd 100644
--- a/runsc/boot/loader.go
+++ b/runsc/boot/loader.go
@@ -32,6 +32,7 @@ import (
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/memutil"
"gvisor.dev/gvisor/pkg/rand"
+ "gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/fdimport"
@@ -1011,6 +1012,8 @@ func (l *Loader) WaitExit() kernel.ExitStatus {
// Cleanup
l.ctrl.stop()
+ refs.OnExit()
+
return l.k.GlobalInit().ExitStatus()
}