summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/arch/arch_abi_autogen_unsafe.go220
-rw-r--r--pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go286
-rw-r--r--pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go322
-rw-r--r--pkg/sentry/control/pprof.go269
-rw-r--r--pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go200
5 files changed, 683 insertions, 614 deletions
diff --git a/pkg/sentry/arch/arch_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
index fe68f921d..1bd458b68 100644
--- a/pkg/sentry/arch/arch_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_abi_autogen_unsafe.go
@@ -23,6 +23,116 @@ var _ marshal.Marshallable = (*SignalStack)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (s *SignalInfo) SizeBytes() int {
+ return 16 +
+ 1*(128-16)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (s *SignalInfo) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ dst[0] = byte(s.Fields[idx])
+ dst = dst[1:]
+ }
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (s *SignalInfo) UnmarshalBytes(src []byte) {
+ s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ for idx := 0; idx < (128-16); idx++ {
+ s.Fields[idx] = src[0]
+ src = src[1:]
+ }
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (s *SignalInfo) Packed() bool {
+ return true
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (s *SignalInfo) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(s))
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(s), src)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return s.CopyOutN(cc, addr, s.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
+ hdr.Len = s.SizeBytes()
+ hdr.Cap = s.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that s
+ // must live until the use above.
+ runtime.KeepAlive(s) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalAct) SizeBytes() int {
return 24 +
(*linux.SignalSet)(nil).SizeBytes()
@@ -260,113 +370,3 @@ func (s *SignalStack) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (s *SignalInfo) SizeBytes() int {
- return 16 +
- 1*(128-16)
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (s *SignalInfo) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Signo))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Errno))
- dst = dst[4:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(s.Code))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- for idx := 0; idx < (128-16); idx++ {
- dst[0] = byte(s.Fields[idx])
- dst = dst[1:]
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (s *SignalInfo) UnmarshalBytes(src []byte) {
- s.Signo = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Errno = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- s.Code = int32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- for idx := 0; idx < (128-16); idx++ {
- s.Fields[idx] = src[0]
- src = src[1:]
- }
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (s *SignalInfo) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (s *SignalInfo) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(s))
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (s *SignalInfo) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(s), src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (s *SignalInfo) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (s *SignalInfo) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return s.CopyOutN(cc, addr, s.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (s *SignalInfo) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (s *SignalInfo) WriteTo(writer io.Writer) (int64, error) {
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s)))
- hdr.Len = s.SizeBytes()
- hdr.Cap = s.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that s
- // must live until the use above.
- runtime.KeepAlive(s) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
index 466bf889a..cdbc88377 100644
--- a/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_amd64_abi_autogen_unsafe.go
@@ -25,6 +25,149 @@ var _ marshal.Marshallable = (*UContext64)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*SignalContext64)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalContext64) SizeBytes() int {
return 184 +
(*linux.SignalSet)(nil).SizeBytes() +
@@ -262,146 +405,3 @@ func (s *SignalContext64) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *UContext64) SizeBytes() int {
- return 16 +
- (*SignalStack)(nil).SizeBytes() +
- (*SignalContext64)(nil).SizeBytes() +
- (*linux.SignalSet)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
- dst = dst[8:]
- u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
- dst = dst[u.Stack.SizeBytes():]
- u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
- dst = dst[u.MContext.SizeBytes():]
- u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
- dst = dst[u.Sigset.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
- src = src[u.Stack.SizeBytes():]
- u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
- src = src[u.MContext.SizeBytes():]
- u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
- src = src[u.Sigset.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *UContext64) Packed() bool {
- return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *UContext64) MarshalUnsafe(dst []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- u.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *UContext64) UnmarshalUnsafe(src []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyOut(unsafe.Pointer(u), src)
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- u.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- u.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- u.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, u.SizeBytes())
- u.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
diff --git a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
index 95a73e115..aac25375e 100644
--- a/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
+++ b/pkg/sentry/arch/arch_arm64_abi_autogen_unsafe.go
@@ -27,167 +27,6 @@ var _ marshal.Marshallable = (*aarch64Ctx)(nil)
var _ marshal.Marshallable = (*linux.SignalSet)(nil)
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (u *UContext64) SizeBytes() int {
- return 16 +
- (*SignalStack)(nil).SizeBytes() +
- (*linux.SignalSet)(nil).SizeBytes() +
- 1*120 +
- 1*8 +
- (*SignalContext64)(nil).SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (u *UContext64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
- dst = dst[8:]
- u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
- dst = dst[u.Stack.SizeBytes():]
- u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
- dst = dst[u.Sigset.SizeBytes():]
- for idx := 0; idx < 120; idx++ {
- dst[0] = byte(u._pad[idx])
- dst = dst[1:]
- }
- for idx := 0; idx < 8; idx++ {
- dst[0] = byte(u._pad2[idx])
- dst = dst[1:]
- }
- u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
- dst = dst[u.MContext.SizeBytes():]
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (u *UContext64) UnmarshalBytes(src []byte) {
- u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
- src = src[u.Stack.SizeBytes():]
- u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
- src = src[u.Sigset.SizeBytes():]
- for idx := 0; idx < 120; idx++ {
- u._pad[idx] = src[0]
- src = src[1:]
- }
- for idx := 0; idx < 8; idx++ {
- u._pad2[idx] = src[0]
- src = src[1:]
- }
- u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
- src = src[u.MContext.SizeBytes():]
-}
-
-// Packed implements marshal.Marshallable.Packed.
-//go:nosplit
-func (u *UContext64) Packed() bool {
- return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (u *UContext64) MarshalUnsafe(dst []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(u))
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- u.MarshalBytes(dst)
- }
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (u *UContext64) UnmarshalUnsafe(src []byte) {
- if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- safecopy.CopyOut(unsafe.Pointer(u), src)
- } else {
- // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- u.UnmarshalBytes(src)
- }
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-//go:nosplit
-func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- u.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-//go:nosplit
-func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- return u.CopyOutN(cc, addr, u.SizeBytes())
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-//go:nosplit
-func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- u.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return length, err
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
- if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
- // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, u.SizeBytes())
- u.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
- // Construct a slice backed by dst's underlying memory.
- var buf []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
- hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
- hdr.Len = u.SizeBytes()
- hdr.Cap = u.SizeBytes()
-
- length, err := writer.Write(buf)
- // Since we bypassed the compiler's escape analysis, indicate that u
- // must live until the use above.
- runtime.KeepAlive(u) // escapes: replaced by intrinsic.
- return int64(length), err
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
func (s *SignalContext64) SizeBytes() int {
return 32 +
8*31 +
@@ -590,3 +429,164 @@ func (f *FpsimdContext) WriteTo(writer io.Writer) (int64, error) {
return int64(length), err
}
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (u *UContext64) SizeBytes() int {
+ return 16 +
+ (*SignalStack)(nil).SizeBytes() +
+ (*linux.SignalSet)(nil).SizeBytes() +
+ 1*120 +
+ 1*8 +
+ (*SignalContext64)(nil).SizeBytes()
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (u *UContext64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Flags))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(u.Link))
+ dst = dst[8:]
+ u.Stack.MarshalBytes(dst[:u.Stack.SizeBytes()])
+ dst = dst[u.Stack.SizeBytes():]
+ u.Sigset.MarshalBytes(dst[:u.Sigset.SizeBytes()])
+ dst = dst[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ dst[0] = byte(u._pad[idx])
+ dst = dst[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ dst[0] = byte(u._pad2[idx])
+ dst = dst[1:]
+ }
+ u.MContext.MarshalBytes(dst[:u.MContext.SizeBytes()])
+ dst = dst[u.MContext.SizeBytes():]
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (u *UContext64) UnmarshalBytes(src []byte) {
+ u.Flags = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Link = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ u.Stack.UnmarshalBytes(src[:u.Stack.SizeBytes()])
+ src = src[u.Stack.SizeBytes():]
+ u.Sigset.UnmarshalBytes(src[:u.Sigset.SizeBytes()])
+ src = src[u.Sigset.SizeBytes():]
+ for idx := 0; idx < 120; idx++ {
+ u._pad[idx] = src[0]
+ src = src[1:]
+ }
+ for idx := 0; idx < 8; idx++ {
+ u._pad2[idx] = src[0]
+ src = src[1:]
+ }
+ u.MContext.UnmarshalBytes(src[:u.MContext.SizeBytes()])
+ src = src[u.MContext.SizeBytes():]
+}
+
+// Packed implements marshal.Marshallable.Packed.
+//go:nosplit
+func (u *UContext64) Packed() bool {
+ return u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed()
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (u *UContext64) MarshalUnsafe(dst []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(u))
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ u.MarshalBytes(dst)
+ }
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (u *UContext64) UnmarshalUnsafe(src []byte) {
+ if u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(u), src)
+ } else {
+ // Type UContext64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ u.UnmarshalBytes(src)
+ }
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+//go:nosplit
+func (u *UContext64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ u.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+//go:nosplit
+func (u *UContext64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ return u.CopyOutN(cc, addr, u.SizeBytes())
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+//go:nosplit
+func (u *UContext64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(u.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ u.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return length, err
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (u *UContext64) WriteTo(writer io.Writer) (int64, error) {
+ if !u.MContext.Packed() && u.Sigset.Packed() && u.Stack.Packed() {
+ // Type UContext64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, u.SizeBytes())
+ u.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
+ // Construct a slice backed by dst's underlying memory.
+ var buf []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(u)))
+ hdr.Len = u.SizeBytes()
+ hdr.Cap = u.SizeBytes()
+
+ length, err := writer.Write(buf)
+ // Since we bypassed the compiler's escape analysis, indicate that u
+ // must live until the use above.
+ runtime.KeepAlive(u) // escapes: replaced by intrinsic.
+ return int64(length), err
+}
+
diff --git a/pkg/sentry/control/pprof.go b/pkg/sentry/control/pprof.go
index 91b8fb44f..b78e29416 100644
--- a/pkg/sentry/control/pprof.go
+++ b/pkg/sentry/control/pprof.go
@@ -15,10 +15,10 @@
package control
import (
- "errors"
"runtime"
"runtime/pprof"
"runtime/trace"
+ "time"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -26,184 +26,253 @@ import (
"gvisor.dev/gvisor/pkg/urpc"
)
-var errNoOutput = errors.New("no output writer provided")
+// Profile includes profile-related RPC stubs. It provides a way to
+// control the built-in runtime profiling facilities.
+//
+// The profile object must be instantied via NewProfile.
+type Profile struct {
+ // kernel is the kernel under profile. It's immutable.
+ kernel *kernel.Kernel
-// ProfileOpts contains options for the StartCPUProfile/Goroutine RPC call.
-type ProfileOpts struct {
- // File is the filesystem path for the profile.
- File string `json:"path"`
+ // cpuMu protects CPU profiling.
+ cpuMu sync.Mutex
- // FilePayload is the destination for the profiling output.
- urpc.FilePayload
+ // blockMu protects block profiling.
+ blockMu sync.Mutex
+
+ // mutexMu protects mutex profiling.
+ mutexMu sync.Mutex
+
+ // traceMu protects trace profiling.
+ traceMu sync.Mutex
+
+ // done is closed when profiling is done.
+ done chan struct{}
}
-// Profile includes profile-related RPC stubs. It provides a way to
-// control the built-in pprof facility in sentry via sentryctl.
-//
-// The following options to sentryctl are added:
+// NewProfile returns a new Profile object, and a stop callback.
//
-// - collect CPU profile on-demand.
-// sentryctl -pid <pid> pprof-cpu-start
-// sentryctl -pid <pid> pprof-cpu-stop
-//
-// - dump out the stack trace of current go routines.
-// sentryctl -pid <pid> pprof-goroutine
-type Profile struct {
- // Kernel is the kernel under profile. It's immutable.
- Kernel *kernel.Kernel
+// The stop callback should be used at most once.
+func NewProfile(k *kernel.Kernel) (*Profile, func()) {
+ p := &Profile{
+ kernel: k,
+ done: make(chan struct{}),
+ }
+ return p, func() {
+ close(p.done)
+ }
+}
- // mu protects the fields below.
- mu sync.Mutex
+// CPUProfileOpts contains options specifically for CPU profiles.
+type CPUProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
- // cpuFile is the current CPU profile output file.
- cpuFile *fd.FD
+ // Duration is the duration of the profile.
+ Duration time.Duration `json:"duration"`
- // traceFile is the current execution trace output file.
- traceFile *fd.FD
+ // Hz is the rate, which may be zero.
+ Hz int `json:"hz"`
}
-// StartCPUProfile is an RPC stub which starts recording the CPU profile in a
-// file.
-func (p *Profile) StartCPUProfile(o *ProfileOpts, _ *struct{}) error {
+// CPU is an RPC stub which collects a CPU profile.
+func (p *Profile) CPU(o *CPUProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
output, err := fd.NewFromFile(o.FilePayload.Files[0])
if err != nil {
return err
}
+ defer output.Close()
- p.mu.Lock()
- defer p.mu.Unlock()
+ p.cpuMu.Lock()
+ defer p.cpuMu.Unlock()
// Returns an error if profiling is already started.
+ if o.Hz != 0 {
+ runtime.SetCPUProfileRate(o.Hz)
+ }
if err := pprof.StartCPUProfile(output); err != nil {
- output.Close()
return err
}
+ defer pprof.StopCPUProfile()
- p.cpuFile = output
- return nil
-}
-
-// StopCPUProfile is an RPC stub which stops the CPU profiling and flush out the
-// profile data. It takes no argument.
-func (p *Profile) StopCPUProfile(_, _ *struct{}) error {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.cpuFile == nil {
- return errors.New("CPU profiling not started")
+ // Collect the profile.
+ select {
+ case <-time.After(o.Duration):
+ case <-p.done:
}
- pprof.StopCPUProfile()
- p.cpuFile.Close()
- p.cpuFile = nil
return nil
}
-// HeapProfile generates a heap profile for the sentry.
-func (p *Profile) HeapProfile(o *ProfileOpts, _ *struct{}) error {
+// HeapProfileOpts contains options specifically for heap profiles.
+type HeapProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
+}
+
+// Heap generates a heap profile.
+func (p *Profile) Heap(o *HeapProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
+
output := o.FilePayload.Files[0]
defer output.Close()
+
runtime.GC() // Get up-to-date statistics.
- if err := pprof.WriteHeapProfile(output); err != nil {
- return err
- }
- return nil
+ return pprof.WriteHeapProfile(output)
+}
+
+// GoroutineProfileOpts contains options specifically for goroutine profiles.
+type GoroutineProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
}
-// GoroutineProfile is an RPC stub which dumps out the stack trace for all
-// running goroutines.
-func (p *Profile) GoroutineProfile(o *ProfileOpts, _ *struct{}) error {
+// Goroutine dumps out the stack trace for all running goroutines.
+func (p *Profile) Goroutine(o *GoroutineProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
+
output := o.FilePayload.Files[0]
defer output.Close()
- if err := pprof.Lookup("goroutine").WriteTo(output, 2); err != nil {
- return err
- }
- return nil
+
+ return pprof.Lookup("goroutine").WriteTo(output, 2)
+}
+
+// BlockProfileOpts contains options specifically for block profiles.
+type BlockProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
+
+ // Duration is the duration of the profile.
+ Duration time.Duration `json:"duration"`
+
+ // Rate is the block profile rate.
+ Rate int `json:"rate"`
}
-// BlockProfile is an RPC stub which dumps out the stack trace that led to
-// blocking on synchronization primitives.
-func (p *Profile) BlockProfile(o *ProfileOpts, _ *struct{}) error {
+// Block dumps a blocking profile.
+func (p *Profile) Block(o *BlockProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
+
output := o.FilePayload.Files[0]
defer output.Close()
- if err := pprof.Lookup("block").WriteTo(output, 0); err != nil {
- return err
+
+ p.blockMu.Lock()
+ defer p.blockMu.Unlock()
+
+ // Always set the rate. We then wait to collect a profile at this rate,
+ // and disable when we're done.
+ rate := 1
+ if o.Rate != 0 {
+ rate = o.Rate
}
- return nil
+ runtime.SetBlockProfileRate(rate)
+ defer runtime.SetBlockProfileRate(0)
+
+ // Collect the profile.
+ select {
+ case <-time.After(o.Duration):
+ case <-p.done:
+ }
+
+ return pprof.Lookup("block").WriteTo(output, 0)
+}
+
+// MutexProfileOpts contains options specifically for mutex profiles.
+type MutexProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
+
+ // Duration is the duration of the profile.
+ Duration time.Duration `json:"duration"`
+
+ // Fraction is the mutex profile fraction.
+ Fraction int `json:"fraction"`
}
-// MutexProfile is an RPC stub which dumps out the stack trace of holders of
-// contended mutexes.
-func (p *Profile) MutexProfile(o *ProfileOpts, _ *struct{}) error {
+// Mutex dumps a mutex profile.
+func (p *Profile) Mutex(o *MutexProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
+
output := o.FilePayload.Files[0]
defer output.Close()
- if err := pprof.Lookup("mutex").WriteTo(output, 0); err != nil {
- return err
+
+ p.mutexMu.Lock()
+ defer p.mutexMu.Unlock()
+
+ // Always set the fraction.
+ fraction := 1
+ if o.Fraction != 0 {
+ fraction = o.Fraction
}
- return nil
+ runtime.SetMutexProfileFraction(fraction)
+ defer runtime.SetMutexProfileFraction(0)
+
+ // Collect the profile.
+ select {
+ case <-time.After(o.Duration):
+ case <-p.done:
+ }
+
+ return pprof.Lookup("mutex").WriteTo(output, 0)
}
-// StartTrace is an RPC stub which starts collection of an execution trace.
-func (p *Profile) StartTrace(o *ProfileOpts, _ *struct{}) error {
+// TraceProfileOpts contains options specifically for traces.
+type TraceProfileOpts struct {
+ // FilePayload is the destination for the profiling output.
+ urpc.FilePayload
+
+ // Duration is the duration of the profile.
+ Duration time.Duration `json:"duration"`
+}
+
+// Trace is an RPC stub which starts collection of an execution trace.
+func (p *Profile) Trace(o *TraceProfileOpts, _ *struct{}) error {
if len(o.FilePayload.Files) < 1 {
- return errNoOutput
+ return nil // Allowed.
}
output, err := fd.NewFromFile(o.FilePayload.Files[0])
if err != nil {
return err
}
+ defer output.Close()
- p.mu.Lock()
- defer p.mu.Unlock()
+ p.traceMu.Lock()
+ defer p.traceMu.Unlock()
// Returns an error if profiling is already started.
if err := trace.Start(output); err != nil {
output.Close()
return err
}
+ defer trace.Stop()
// Ensure all trace contexts are registered.
- p.Kernel.RebuildTraceContexts()
-
- p.traceFile = output
- return nil
-}
-
-// StopTrace is an RPC stub which stops collection of an ongoing execution
-// trace and flushes the trace data. It takes no argument.
-func (p *Profile) StopTrace(_, _ *struct{}) error {
- p.mu.Lock()
- defer p.mu.Unlock()
+ p.kernel.RebuildTraceContexts()
- if p.traceFile == nil {
- return errors.New("execution tracing not started")
+ // Wait for the trace.
+ select {
+ case <-time.After(o.Duration):
+ case <-p.done:
}
// Similarly to the case above, if tasks have not ended traces, we will
// lose information. Thus we need to rebuild the tasks in order to have
// complete information. This will not lose information if multiple
// traces are overlapping.
- p.Kernel.RebuildTraceContexts()
+ p.kernel.RebuildTraceContexts()
- trace.Stop()
- p.traceFile.Close()
- p.traceFile = nil
return nil
}
diff --git a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
index 6886c63d1..956643160 100644
--- a/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
+++ b/pkg/sentry/syscalls/linux/linux_abi_autogen_unsafe.go
@@ -458,26 +458,49 @@ func (u *userSockFprog) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *multipleMessageHeader64) SizeBytes() int {
- return 8 +
- (*MessageHeader64)(nil).SizeBytes()
+func (m *MessageHeader64) SizeBytes() int {
+ return 56
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
- m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
- dst = dst[m.msgHdr.SizeBytes():]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
+func (m *MessageHeader64) MarshalBytes(dst []byte) {
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
+ dst = dst[4:]
+ // Padding: dst[:sizeof(uint32)] ~= uint32(0)
+ dst = dst[4:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
+ dst = dst[8:]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
- m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
- src = src[m.msgHdr.SizeBytes():]
- m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *MessageHeader64) UnmarshalBytes(src []byte) {
+ m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
+ src = src[4:]
+ // Padding: var _ uint32 ~= src[:sizeof(uint32)]
+ src = src[4:]
+ m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
+ src = src[8:]
+ m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -485,40 +508,23 @@ func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *multipleMessageHeader64) Packed() bool {
- return m.msgHdr.Packed()
+func (m *MessageHeader64) Packed() bool {
+ return true
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
- m.MarshalBytes(dst)
- }
+func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
- if m.msgHdr.Packed() {
- safecopy.CopyOut(unsafe.Pointer(m), src)
- } else {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
- m.UnmarshalBytes(src)
- }
+func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- m.MarshalBytes(buf) // escapes: fallback.
- return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
- }
-
+func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -535,23 +541,13 @@ func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
- buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
- length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
- // Unmarshal unconditionally. If we had a short copy-in, this results in a
- // partially unmarshalled struct.
- m.UnmarshalBytes(buf) // escapes: fallback.
- return length, err
- }
-
+func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -567,15 +563,7 @@ func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Ad
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
- if !m.msgHdr.Packed() {
- // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
- buf := make([]byte, m.SizeBytes())
- m.MarshalBytes(buf)
- length, err := writer.Write(buf)
- return int64(length), err
- }
-
+func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -591,49 +579,26 @@ func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
}
// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (m *MessageHeader64) SizeBytes() int {
- return 56
+func (m *multipleMessageHeader64) SizeBytes() int {
+ return 8 +
+ (*MessageHeader64)(nil).SizeBytes()
}
// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (m *MessageHeader64) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Name))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.NameLen))
- dst = dst[4:]
- // Padding: dst[:sizeof(uint32)] ~= uint32(0)
- dst = dst[4:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Iov))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.IovLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.Control))
- dst = dst[8:]
- usermem.ByteOrder.PutUint64(dst[:8], uint64(m.ControlLen))
- dst = dst[8:]
- usermem.ByteOrder.PutUint32(dst[:4], uint32(m.Flags))
+func (m *multipleMessageHeader64) MarshalBytes(dst []byte) {
+ m.msgHdr.MarshalBytes(dst[:m.msgHdr.SizeBytes()])
+ dst = dst[m.msgHdr.SizeBytes():]
+ usermem.ByteOrder.PutUint32(dst[:4], uint32(m.msgLen))
dst = dst[4:]
// Padding: dst[:sizeof(int32)] ~= int32(0)
dst = dst[4:]
}
// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (m *MessageHeader64) UnmarshalBytes(src []byte) {
- m.Name = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.NameLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
- src = src[4:]
- // Padding: var _ uint32 ~= src[:sizeof(uint32)]
- src = src[4:]
- m.Iov = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.IovLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Control = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.ControlLen = uint64(usermem.ByteOrder.Uint64(src[:8]))
- src = src[8:]
- m.Flags = int32(usermem.ByteOrder.Uint32(src[:4]))
+func (m *multipleMessageHeader64) UnmarshalBytes(src []byte) {
+ m.msgHdr.UnmarshalBytes(src[:m.msgHdr.SizeBytes()])
+ src = src[m.msgHdr.SizeBytes():]
+ m.msgLen = uint32(usermem.ByteOrder.Uint32(src[:4]))
src = src[4:]
// Padding: var _ int32 ~= src[:sizeof(int32)]
src = src[4:]
@@ -641,23 +606,40 @@ func (m *MessageHeader64) UnmarshalBytes(src []byte) {
// Packed implements marshal.Marshallable.Packed.
//go:nosplit
-func (m *MessageHeader64) Packed() bool {
- return true
+func (m *multipleMessageHeader64) Packed() bool {
+ return m.msgHdr.Packed()
}
// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (m *MessageHeader64) MarshalUnsafe(dst []byte) {
- safecopy.CopyIn(dst, unsafe.Pointer(m))
+func (m *multipleMessageHeader64) MarshalUnsafe(dst []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyIn(dst, unsafe.Pointer(m))
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to MarshalBytes.
+ m.MarshalBytes(dst)
+ }
}
// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (m *MessageHeader64) UnmarshalUnsafe(src []byte) {
- safecopy.CopyOut(unsafe.Pointer(m), src)
+func (m *multipleMessageHeader64) UnmarshalUnsafe(src []byte) {
+ if m.msgHdr.Packed() {
+ safecopy.CopyOut(unsafe.Pointer(m), src)
+ } else {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fallback to UnmarshalBytes.
+ m.UnmarshalBytes(src)
+ }
}
// CopyOutN implements marshal.Marshallable.CopyOutN.
//go:nosplit
-func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+func (m *multipleMessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ m.MarshalBytes(buf) // escapes: fallback.
+ return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -674,13 +656,23 @@ func (m *MessageHeader64) CopyOutN(cc marshal.CopyContext, addr usermem.Addr, li
// CopyOut implements marshal.Marshallable.CopyOut.
//go:nosplit
-func (m *MessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyOut(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
return m.CopyOutN(cc, addr, m.SizeBytes())
}
// CopyIn implements marshal.Marshallable.CopyIn.
//go:nosplit
-func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+func (m *multipleMessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to UnmarshalBytes.
+ buf := cc.CopyScratchBuffer(m.SizeBytes()) // escapes: okay.
+ length, err := cc.CopyInBytes(addr, buf) // escapes: okay.
+ // Unmarshal unconditionally. If we had a short copy-in, this results in a
+ // partially unmarshalled struct.
+ m.UnmarshalBytes(buf) // escapes: fallback.
+ return length, err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
@@ -696,7 +688,15 @@ func (m *MessageHeader64) CopyIn(cc marshal.CopyContext, addr usermem.Addr) (int
}
// WriteTo implements io.WriterTo.WriteTo.
-func (m *MessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+func (m *multipleMessageHeader64) WriteTo(writer io.Writer) (int64, error) {
+ if !m.msgHdr.Packed() {
+ // Type multipleMessageHeader64 doesn't have a packed layout in memory, fall back to MarshalBytes.
+ buf := make([]byte, m.SizeBytes())
+ m.MarshalBytes(buf)
+ length, err := writer.Write(buf)
+ return int64(length), err
+ }
+
// Construct a slice backed by dst's underlying memory.
var buf []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf))