summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/platform/ptrace/subprocess.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/platform/ptrace/subprocess.go')
-rw-r--r--pkg/sentry/platform/ptrace/subprocess.go50
1 files changed, 35 insertions, 15 deletions
diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go
index d3b196924..15e84735e 100644
--- a/pkg/sentry/platform/ptrace/subprocess.go
+++ b/pkg/sentry/platform/ptrace/subprocess.go
@@ -21,10 +21,11 @@ import (
"sync"
"syscall"
- "gvisor.googlesource.com/gvisor/pkg/procid"
- "gvisor.googlesource.com/gvisor/pkg/sentry/arch"
- "gvisor.googlesource.com/gvisor/pkg/sentry/platform"
- "gvisor.googlesource.com/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/log"
+ "gvisor.dev/gvisor/pkg/procid"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
+ "gvisor.dev/gvisor/pkg/sentry/platform"
+ "gvisor.dev/gvisor/pkg/sentry/usermem"
)
// globalPool exists to solve two distinct problems:
@@ -122,7 +123,7 @@ type subprocess struct {
contexts map[*context]struct{}
}
-// newSubprocess returns a useable subprocess.
+// newSubprocess returns a usable subprocess.
//
// This will either be a newly created subprocess, or one from the global pool.
// The create function will be called in the latter case, which is guaranteed
@@ -154,6 +155,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {
errChan <- err
return
}
+ firstThread.grabInitRegs()
// Ready to handle requests.
errChan <- nil
@@ -178,6 +180,7 @@ func newSubprocess(create func() (*thread, error)) (*subprocess, error) {
// Detach the thread.
t.detach()
+ t.initRegs = firstThread.initRegs
// Return the thread.
r <- t
@@ -252,7 +255,7 @@ func (s *subprocess) newThread() *thread {
return t
}
-// attach attachs to the thread.
+// attach attaches to the thread.
func (t *thread) attach() {
if _, _, errno := syscall.RawSyscall(syscall.SYS_PTRACE, syscall.PTRACE_ATTACH, uintptr(t.tid), 0); errno != 0 {
panic(fmt.Sprintf("unable to attach: %v", errno))
@@ -268,7 +271,9 @@ func (t *thread) attach() {
// Initialize options.
t.init()
+}
+func (t *thread) grabInitRegs() {
// Grab registers.
//
// Note that we adjust the current register RIP value to be just before
@@ -280,9 +285,9 @@ func (t *thread) attach() {
t.initRegs.Rip -= initRegsRipAdjustment
}
-// detach detachs from the thread.
+// detach detaches from the thread.
//
-// Because the SIGSTOP is not supressed, the thread will enter group-stop.
+// Because the SIGSTOP is not suppressed, the thread will enter group-stop.
func (t *thread) detach() {
if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, syscall.PTRACE_DETACH, uintptr(t.tid), 0, uintptr(syscall.SIGSTOP), 0, 0); errno != 0 {
panic(fmt.Sprintf("can't detach new clone: %v", errno))
@@ -300,6 +305,18 @@ const (
killed
)
+func (t *thread) dumpAndPanic(message string) {
+ var regs syscall.PtraceRegs
+ message += "\n"
+ if err := t.getRegs(&regs); err == nil {
+ message += dumpRegs(&regs)
+ } else {
+ log.Warningf("unable to get registers: %v", err)
+ }
+ message += fmt.Sprintf("stubStart\t = %016x\n", stubStart)
+ panic(message)
+}
+
// wait waits for a stop event.
//
// Precondition: outcome is a valid waitOutcome.
@@ -320,7 +337,7 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {
switch outcome {
case stopped:
if !status.Stopped() {
- panic(fmt.Sprintf("ptrace status unexpected: got %v, wanted stopped", status))
+ t.dumpAndPanic(fmt.Sprintf("ptrace status unexpected: got %v, wanted stopped", status))
}
stopSig := status.StopSignal()
if stopSig == 0 {
@@ -334,12 +351,12 @@ func (t *thread) wait(outcome waitOutcome) syscall.Signal {
return stopSig
case killed:
if !status.Exited() && !status.Signaled() {
- panic(fmt.Sprintf("ptrace status unexpected: got %v, wanted exited", status))
+ t.dumpAndPanic(fmt.Sprintf("ptrace status unexpected: got %v, wanted exited", status))
}
return syscall.Signal(status.ExitStatus())
default:
// Should not happen.
- panic(fmt.Sprintf("unknown outcome: %v", outcome))
+ t.dumpAndPanic(fmt.Sprintf("unknown outcome: %v", outcome))
}
}
}
@@ -357,13 +374,16 @@ func (t *thread) destroy() {
// init initializes trace options.
func (t *thread) init() {
- // Set our TRACESYSGOOD option to differeniate real SIGTRAP.
+ // Set the TRACESYSGOOD option to differentiate real SIGTRAP.
+ // set PTRACE_O_EXITKILL to ensure that the unexpected exit of the
+ // sentry will immediately kill the associated stubs.
+ const PTRACE_O_EXITKILL = 0x100000
_, _, errno := syscall.RawSyscall6(
syscall.SYS_PTRACE,
syscall.PTRACE_SETOPTIONS,
uintptr(t.tid),
0,
- syscall.PTRACE_O_TRACESYSGOOD,
+ syscall.PTRACE_O_TRACESYSGOOD|syscall.PTRACE_O_TRACEEXIT|PTRACE_O_EXITKILL,
0, 0)
if errno != 0 {
panic(fmt.Sprintf("ptrace set options failed: %v", errno))
@@ -406,7 +426,7 @@ func (t *thread) syscall(regs *syscall.PtraceRegs) (uintptr, error) {
// between syscall-enter-stop and syscall-exit-stop; it happens *after*
// syscall-exit-stop.)" - ptrace(2), "Syscall-stops"
if sig := t.wait(stopped); sig != (syscallEvent | syscall.SIGTRAP) {
- panic(fmt.Sprintf("wait failed: expected SIGTRAP, got %v [%d]", sig, sig))
+ t.dumpAndPanic(fmt.Sprintf("wait failed: expected SIGTRAP, got %v [%d]", sig, sig))
}
// Grab registers.
@@ -538,7 +558,7 @@ func (s *subprocess) switchToApp(c *context, ac arch.Context) bool {
if c.signalInfo.Code > 0 {
// The signal was generated by the kernel. We inspect
// the signal information, and may patch it in order to
- // faciliate vsyscall emulation. See patchSignalInfo.
+ // facilitate vsyscall emulation. See patchSignalInfo.
patchSignalInfo(regs, &c.signalInfo)
return false
} else if c.signalInfo.Code <= 0 && c.signalInfo.Pid() == int32(os.Getpid()) {