summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel/task_run.go
diff options
context:
space:
mode:
authorJamie Liu <jamieliu@google.com>2020-11-13 14:46:03 -0800
committergVisor bot <gvisor-bot@google.com>2020-11-13 14:47:47 -0800
commita1cb52447f3e9414211b9e0558f1231ae3e59329 (patch)
treeb1618b3e2507f0ec6d2461c1a0ba92d140d4e0a4 /pkg/sentry/kernel/task_run.go
parent89517eca414a311598aa6e64a229c7acc5e3a22f (diff)
Check for misuse of kernel.Task as context.Context.
Checks in Task.block() and Task.Value() are conditional on race detection being enabled, since these functions are relatively hot. Checks in Task.SleepStart() and Task.UninterruptibleSleepStart() are enabled unconditionally, since these functions are not thought to lie on any critical paths, and misuse of these functions is required for b/168241471 to manifest. PiperOrigin-RevId: 342342175
Diffstat (limited to 'pkg/sentry/kernel/task_run.go')
-rw-r--r--pkg/sentry/kernel/task_run.go15
1 files changed, 15 insertions, 0 deletions
diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go
index 0f8294dcd..c5858da30 100644
--- a/pkg/sentry/kernel/task_run.go
+++ b/pkg/sentry/kernel/task_run.go
@@ -16,11 +16,13 @@ package kernel
import (
"bytes"
+ "fmt"
"runtime"
"runtime/trace"
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/goid"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/hostcpu"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
@@ -57,6 +59,8 @@ type taskRunState interface {
// make it visible in stack dumps. A goroutine for a given task can be identified
// searching for Task.run()'s argument value.
func (t *Task) run(threadID uintptr) {
+ atomic.StoreInt64(&t.goid, goid.Get())
+
// Construct t.blockingTimer here. We do this here because we can't
// reconstruct t.blockingTimer during restore in Task.afterLoad(), because
// kernel.timekeeper.SetClocks() hasn't been called yet.
@@ -99,6 +103,9 @@ func (t *Task) run(threadID uintptr) {
t.tg.pidns.owner.runningGoroutines.Done()
t.p.Release()
+ // Deferring this store triggers a false positive in the race
+ // detector (https://github.com/golang/go/issues/42599).
+ atomic.StoreInt64(&t.goid, 0)
// Keep argument alive because stack trace for dead variables may not be correct.
runtime.KeepAlive(threadID)
return
@@ -375,6 +382,14 @@ func (app *runApp) execute(t *Task) taskRunState {
}
}
+// assertTaskGoroutine panics if the caller is not running on t's task
+// goroutine.
+func (t *Task) assertTaskGoroutine() {
+ if got, want := goid.Get(), atomic.LoadInt64(&t.goid); got != want {
+ panic(fmt.Sprintf("running on goroutine %d (task goroutine for kernel.Task %p is %d)", got, t, want))
+ }
+}
+
// waitGoroutineStoppedOrExited blocks until t's task goroutine stops or exits.
func (t *Task) waitGoroutineStoppedOrExited() {
t.goroutineStopped.Wait()