summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/kernel
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2019-11-21 19:43:52 +0000
committergVisor bot <gvisor-bot@google.com>2019-11-21 19:43:52 +0000
commitb3f49ebd74ff30ad50b495ddd563ee5aa9cd2bca (patch)
treead383037abfddbfeb8a11229eb1637909b25002e /pkg/sentry/kernel
parent158f38053d931caf57b90bb0b496cf7d2ec4d42b (diff)
parentc0f89eba6ebdec08460bd796fc62d6aef674d141 (diff)
Merge release-20191114.0-18-gc0f89eb (automated)
Diffstat (limited to 'pkg/sentry/kernel')
-rwxr-xr-xpkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go20
-rw-r--r--pkg/sentry/kernel/task.go4
2 files changed, 12 insertions, 12 deletions
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
index 25ad17a4e..11e820656 100755
--- a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
+++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
@@ -1,25 +1,25 @@
package kernel
import (
- "fmt"
- "reflect"
"strings"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "fmt"
+ "gvisor.dev/gvisor/pkg/syncutil"
+ "reflect"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
// with any writer critical sections in sc.
-func SeqAtomicLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
+func SeqAtomicLoadTaskGoroutineSchedInfo(sc *syncutil.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
// This function doesn't use SeqAtomicTryLoad because doing so is
// measurably, significantly (~20%) slower; Go is awful at inlining.
var val TaskGoroutineSchedInfo
for {
epoch := sc.BeginRead()
- if gvsync.RaceEnabled {
+ if syncutil.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
@@ -35,10 +35,10 @@ func SeqAtomicLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, ptr *TaskGoroutine
// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
// would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
-func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, epoch gvsync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) {
+func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *syncutil.SeqCount, epoch syncutil.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) {
var val TaskGoroutineSchedInfo
- if gvsync.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ if syncutil.RaceEnabled {
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
}
@@ -49,7 +49,7 @@ func initTaskGoroutineSchedInfo() {
var val TaskGoroutineSchedInfo
typ := reflect.TypeOf(val)
name := typ.Name()
- if ptrs := gvsync.PointersInType(typ, name); len(ptrs) != 0 {
+ if ptrs := syncutil.PointersInType(typ, name); len(ptrs) != 0 {
panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
}
}
diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go
index 9be3dae3c..80c8e5464 100644
--- a/pkg/sentry/kernel/task.go
+++ b/pkg/sentry/kernel/task.go
@@ -35,8 +35,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/syncutil"
"gvisor.dev/gvisor/pkg/waiter"
- "gvisor.dev/gvisor/third_party/gvsync"
)
// Task represents a thread of execution in the untrusted app. It
@@ -83,7 +83,7 @@ type Task struct {
//
// gosched is protected by goschedSeq. gosched is owned by the task
// goroutine.
- goschedSeq gvsync.SeqCount `state:"nosave"`
+ goschedSeq syncutil.SeqCount `state:"nosave"`
gosched TaskGoroutineSchedInfo
// yieldCount is the number of times the task goroutine has called