summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel/ptrace.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/kernel/ptrace.go')
-rw-r--r--pkg/sentry/kernel/ptrace.go108
1 files changed, 50 insertions, 58 deletions
diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go
index 20b1c4cd4..f1c2c4bf0 100644
--- a/pkg/sentry/kernel/ptrace.go
+++ b/pkg/sentry/kernel/ptrace.go
@@ -16,7 +16,6 @@ package kernel
import (
"fmt"
- "syscall"
"gvisor.googlesource.com/gvisor/pkg/abi/linux"
"gvisor.googlesource.com/gvisor/pkg/sentry/arch"
@@ -24,19 +23,6 @@ import (
"gvisor.googlesource.com/gvisor/pkg/syserror"
)
-// ptrace constants from Linux's include/uapi/linux/ptrace.h.
-const (
- _PTRACE_EVENT_SECCOMP = 7
- PTRACE_SEIZE = 0x4206
- PTRACE_INTERRUPT = 0x4207
- PTRACE_LISTEN = 0x4208
- PTRACE_PEEKSIGINFO = 0x4209
- PTRACE_GETSIGMASK = 0x420a
- PTRACE_SETSIGMASK = 0x420b
- _PTRACE_O_EXITKILL = 1 << 20
- _PTRACE_O_TRACESECCOMP = 1 << _PTRACE_EVENT_SECCOMP
-)
-
// ptraceOptions are the subset of options controlling a task's ptrace behavior
// that are set by ptrace(PTRACE_SETOPTIONS).
type ptraceOptions struct {
@@ -505,7 +491,7 @@ func (t *Task) ptraceSeccomp(data uint16) bool {
return false
}
t.Debugf("Entering PTRACE_EVENT_SECCOMP stop")
- t.ptraceEventLocked(_PTRACE_EVENT_SECCOMP, uint64(data))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_SECCOMP, uint64(data))
return true
}
@@ -587,19 +573,19 @@ func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions
case ptraceCloneKindClone:
if t.ptraceOpts.TraceClone {
t.Debugf("Entering PTRACE_EVENT_CLONE stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_CLONE, uint64(t.tg.pidns.tids[child]))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_CLONE, uint64(t.tg.pidns.tids[child]))
event = true
}
case ptraceCloneKindFork:
if t.ptraceOpts.TraceFork {
t.Debugf("Entering PTRACE_EVENT_FORK stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_FORK, uint64(t.tg.pidns.tids[child]))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_FORK, uint64(t.tg.pidns.tids[child]))
event = true
}
case ptraceCloneKindVfork:
if t.ptraceOpts.TraceVfork {
t.Debugf("Entering PTRACE_EVENT_VFORK stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK, uint64(t.tg.pidns.tids[child]))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_VFORK, uint64(t.tg.pidns.tids[child]))
event = true
}
default:
@@ -657,7 +643,7 @@ func (t *Task) ptraceVforkDone(child ThreadID) bool {
return false
}
t.Debugf("Entering PTRACE_EVENT_VFORK_DONE stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK_DONE, uint64(child))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_VFORK_DONE, uint64(child))
return true
}
@@ -680,7 +666,7 @@ func (t *Task) ptraceExec(oldTID ThreadID) {
}
if t.ptraceOpts.TraceExec {
t.Debugf("Entering PTRACE_EVENT_EXEC stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_EXEC, uint64(oldTID))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_EXEC, uint64(oldTID))
return
}
// "If the PTRACE_O_TRACEEXEC option is not in effect for the execing
@@ -714,7 +700,7 @@ func (t *Task) ptraceExit() {
status := t.exitStatus.Status()
t.tg.signalHandlers.mu.Unlock()
t.Debugf("Entering PTRACE_EVENT_EXIT stop")
- t.ptraceEventLocked(syscall.PTRACE_EVENT_EXIT, uint64(status))
+ t.ptraceEventLocked(linux.PTRACE_EVENT_EXIT, uint64(status))
}
// Preconditions: The TaskSet mutex must be locked.
@@ -762,7 +748,7 @@ func (t *Task) ptraceKill(target *Task) error {
// Ptrace implements the ptrace system call.
func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
// PTRACE_TRACEME ignores all other arguments.
- if req == syscall.PTRACE_TRACEME {
+ if req == linux.PTRACE_TRACEME {
return t.ptraceTraceme()
}
// All other ptrace requests operate on a current or future tracee
@@ -774,12 +760,12 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
// PTRACE_ATTACH (and PTRACE_SEIZE, which is unimplemented) do not require
// that target is not already a tracee.
- if req == syscall.PTRACE_ATTACH {
+ if req == linux.PTRACE_ATTACH {
return t.ptraceAttach(target)
}
// PTRACE_KILL (and PTRACE_INTERRUPT, which is unimplemented) require that
// the target is a tracee, but does not require that it is ptrace-stopped.
- if req == syscall.PTRACE_KILL {
+ if req == linux.PTRACE_KILL {
return t.ptraceKill(target)
}
// All other ptrace requests require that the target is a ptrace-stopped
@@ -812,37 +798,37 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
// Resuming commands end the ptrace stop, but only if successful.
switch req {
- case syscall.PTRACE_DETACH:
+ case linux.PTRACE_DETACH:
if err := t.ptraceDetach(target, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
}
return nil
- case syscall.PTRACE_CONT:
+ case linux.PTRACE_CONT:
if err := target.ptraceUnstop(ptraceSyscallNone, false, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
}
return nil
- case syscall.PTRACE_SYSCALL:
+ case linux.PTRACE_SYSCALL:
if err := target.ptraceUnstop(ptraceSyscallIntercept, false, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
}
return nil
- case syscall.PTRACE_SINGLESTEP:
+ case linux.PTRACE_SINGLESTEP:
if err := target.ptraceUnstop(ptraceSyscallNone, true, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
}
return nil
- case syscall.PTRACE_SYSEMU:
+ case linux.PTRACE_SYSEMU:
if err := target.ptraceUnstop(ptraceSyscallEmu, false, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
}
return nil
- case syscall.PTRACE_SYSEMU_SINGLESTEP:
+ case linux.PTRACE_SYSEMU_SINGLESTEP:
if err := target.ptraceUnstop(ptraceSyscallEmu, true, linux.Signal(data)); err != nil {
target.ptraceUnfreeze()
return err
@@ -853,7 +839,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
defer target.ptraceUnfreeze()
switch req {
- case syscall.PTRACE_PEEKTEXT, syscall.PTRACE_PEEKDATA:
+ case linux.PTRACE_PEEKTEXT, linux.PTRACE_PEEKDATA:
// "At the system call level, the PTRACE_PEEKTEXT, PTRACE_PEEKDATA, and
// PTRACE_PEEKUSER requests have a different API: they store the result
// at the address specified by the data parameter, and the return value
@@ -867,13 +853,13 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
_, err := t.CopyOut(data, word)
return err
- case syscall.PTRACE_POKETEXT, syscall.PTRACE_POKEDATA:
+ case linux.PTRACE_POKETEXT, linux.PTRACE_POKEDATA:
_, err := usermem.CopyObjectOut(t, target.MemoryManager(), addr, t.Arch().Native(uintptr(data)), usermem.IOOpts{
IgnorePermissions: true,
})
return err
- case syscall.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER
+ case linux.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER
n, err := target.Arch().PtracePeekUser(uintptr(addr))
if err != nil {
return err
@@ -881,10 +867,10 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
_, err = t.CopyOut(data, n)
return err
- case syscall.PTRACE_POKEUSR: // aka PTRACE_POKEUSER
+ case linux.PTRACE_POKEUSR: // aka PTRACE_POKEUSER
return target.Arch().PtracePokeUser(uintptr(addr), uintptr(data))
- case syscall.PTRACE_GETREGS:
+ case linux.PTRACE_GETREGS:
// "Copy the tracee's general-purpose ... registers ... to the address
// data in the tracer. ... (addr is ignored.) Note that SPARC systems
// have the meaning of data and addr reversed ..."
@@ -898,7 +884,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
})
return err
- case syscall.PTRACE_GETFPREGS:
+ case linux.PTRACE_GETFPREGS:
_, err := target.Arch().PtraceGetFPRegs(&usermem.IOReadWriter{
Ctx: t,
IO: t.MemoryManager(),
@@ -909,7 +895,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
})
return err
- case syscall.PTRACE_GETREGSET:
+ case linux.PTRACE_GETREGSET:
// "Read the tracee's registers. addr specifies, in an
// architecture-dependent way, the type of registers to be read. ...
// data points to a struct iovec, which describes the destination
@@ -934,7 +920,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
ar.End -= usermem.Addr(n)
return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))
- case syscall.PTRACE_SETREGS:
+ case linux.PTRACE_SETREGS:
_, err := target.Arch().PtraceSetRegs(&usermem.IOReadWriter{
Ctx: t,
IO: t.MemoryManager(),
@@ -945,7 +931,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
})
return err
- case syscall.PTRACE_SETFPREGS:
+ case linux.PTRACE_SETFPREGS:
_, err := target.Arch().PtraceSetFPRegs(&usermem.IOReadWriter{
Ctx: t,
IO: t.MemoryManager(),
@@ -956,7 +942,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
})
return err
- case syscall.PTRACE_SETREGSET:
+ case linux.PTRACE_SETREGSET:
ars, err := t.CopyInIovecs(data, 1)
if err != nil {
return err
@@ -976,7 +962,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
ar.End -= usermem.Addr(n)
return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar))
- case syscall.PTRACE_GETSIGINFO:
+ case linux.PTRACE_GETSIGINFO:
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
if target.ptraceSiginfo == nil {
@@ -985,7 +971,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
_, err := t.CopyOut(data, target.ptraceSiginfo)
return err
- case syscall.PTRACE_SETSIGINFO:
+ case linux.PTRACE_SETSIGINFO:
var info arch.SignalInfo
if _, err := t.CopyIn(data, &info); err != nil {
return err
@@ -998,7 +984,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
target.ptraceSiginfo = &info
return nil
- case PTRACE_GETSIGMASK:
+ case linux.PTRACE_GETSIGMASK:
if addr != linux.SignalSetSize {
return syserror.EINVAL
}
@@ -1007,7 +993,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
_, err := t.CopyOut(data, target.tr.SignalMask)
return err
- case PTRACE_SETSIGMASK:
+ case linux.PTRACE_SETSIGMASK:
if addr != linux.SignalSetSize {
return syserror.EINVAL
}
@@ -1019,29 +1005,35 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error {
target.SetSignalMask(mask &^ UnblockableSignals)
return nil
- case syscall.PTRACE_SETOPTIONS:
+ case linux.PTRACE_SETOPTIONS:
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
- validOpts := uintptr(_PTRACE_O_EXITKILL | syscall.PTRACE_O_TRACESYSGOOD | syscall.PTRACE_O_TRACECLONE |
- syscall.PTRACE_O_TRACEEXEC | syscall.PTRACE_O_TRACEEXIT | syscall.PTRACE_O_TRACEFORK |
- _PTRACE_O_TRACESECCOMP | syscall.PTRACE_O_TRACEVFORK | syscall.PTRACE_O_TRACEVFORKDONE)
+ validOpts := uintptr(linux.PTRACE_O_EXITKILL |
+ linux.PTRACE_O_TRACESYSGOOD |
+ linux.PTRACE_O_TRACECLONE |
+ linux.PTRACE_O_TRACEEXEC |
+ linux.PTRACE_O_TRACEEXIT |
+ linux.PTRACE_O_TRACEFORK |
+ linux.PTRACE_O_TRACESECCOMP |
+ linux.PTRACE_O_TRACEVFORK |
+ linux.PTRACE_O_TRACEVFORKDONE)
if uintptr(data)&^validOpts != 0 {
return syserror.EINVAL
}
target.ptraceOpts = ptraceOptions{
- ExitKill: data&_PTRACE_O_EXITKILL != 0,
- SysGood: data&syscall.PTRACE_O_TRACESYSGOOD != 0,
- TraceClone: data&syscall.PTRACE_O_TRACECLONE != 0,
- TraceExec: data&syscall.PTRACE_O_TRACEEXEC != 0,
- TraceExit: data&syscall.PTRACE_O_TRACEEXIT != 0,
- TraceFork: data&syscall.PTRACE_O_TRACEFORK != 0,
- TraceSeccomp: data&_PTRACE_O_TRACESECCOMP != 0,
- TraceVfork: data&syscall.PTRACE_O_TRACEVFORK != 0,
- TraceVforkDone: data&syscall.PTRACE_O_TRACEVFORKDONE != 0,
+ ExitKill: data&linux.PTRACE_O_EXITKILL != 0,
+ SysGood: data&linux.PTRACE_O_TRACESYSGOOD != 0,
+ TraceClone: data&linux.PTRACE_O_TRACECLONE != 0,
+ TraceExec: data&linux.PTRACE_O_TRACEEXEC != 0,
+ TraceExit: data&linux.PTRACE_O_TRACEEXIT != 0,
+ TraceFork: data&linux.PTRACE_O_TRACEFORK != 0,
+ TraceSeccomp: data&linux.PTRACE_O_TRACESECCOMP != 0,
+ TraceVfork: data&linux.PTRACE_O_TRACEVFORK != 0,
+ TraceVforkDone: data&linux.PTRACE_O_TRACEVFORKDONE != 0,
}
return nil
- case syscall.PTRACE_GETEVENTMSG:
+ case linux.PTRACE_GETEVENTMSG:
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
_, err := t.CopyOut(usermem.Addr(data), target.ptraceEventMsg)