summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go36
-rw-r--r--pkg/sentry/time/seqatomic_parameters_unsafe.go36
2 files changed, 30 insertions, 42 deletions
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
index 950645965..90148bbb2 100644
--- a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
+++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
@@ -10,39 +10,33 @@ import (
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
-// with any writer critical sections in sc.
-func SeqAtomicLoadTaskGoroutineSchedInfo(sc *sync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
- // This function doesn't use SeqAtomicTryLoad because doing so is
- // measurably, significantly (~20%) slower; Go is awful at inlining.
- var val TaskGoroutineSchedInfo
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
for {
- epoch := sc.BeginRead()
- if sync.RaceEnabled {
-
- sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
- } else {
-
- val = *ptr
- }
- if sc.ReadOk(epoch) {
- break
+ if val, ok := SeqAtomicTryLoadTaskGoroutineSchedInfo(seq, seq.BeginRead(), ptr); ok {
+ return val
}
}
- return val
}
// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
-// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
-// would race with a writer critical section, SeqAtomicTryLoad returns
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
-func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) {
- var val TaskGoroutineSchedInfo
+//
+//go:nosplit
+func SeqAtomicTryLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (val TaskGoroutineSchedInfo, ok bool) {
if sync.RaceEnabled {
+
sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
+
val = *ptr
}
- return val, sc.ReadOk(epoch)
+ ok = seq.ReadOk(epoch)
+ return
}
func initTaskGoroutineSchedInfo() {
diff --git a/pkg/sentry/time/seqatomic_parameters_unsafe.go b/pkg/sentry/time/seqatomic_parameters_unsafe.go
index efd3ccae2..2cb001080 100644
--- a/pkg/sentry/time/seqatomic_parameters_unsafe.go
+++ b/pkg/sentry/time/seqatomic_parameters_unsafe.go
@@ -10,39 +10,33 @@ import (
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
-// with any writer critical sections in sc.
-func SeqAtomicLoadParameters(sc *sync.SeqCount, ptr *Parameters) Parameters {
- // This function doesn't use SeqAtomicTryLoad because doing so is
- // measurably, significantly (~20%) slower; Go is awful at inlining.
- var val Parameters
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoadParameters(seq *sync.SeqCount, ptr *Parameters) Parameters {
for {
- epoch := sc.BeginRead()
- if sync.RaceEnabled {
-
- sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
- } else {
-
- val = *ptr
- }
- if sc.ReadOk(epoch) {
- break
+ if val, ok := SeqAtomicTryLoadParameters(seq, seq.BeginRead(), ptr); ok {
+ return val
}
}
- return val
}
// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
-// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
-// would race with a writer critical section, SeqAtomicTryLoad returns
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
-func SeqAtomicTryLoadParameters(sc *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Parameters) (Parameters, bool) {
- var val Parameters
+//
+//go:nosplit
+func SeqAtomicTryLoadParameters(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Parameters) (val Parameters, ok bool) {
if sync.RaceEnabled {
+
sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
+
val = *ptr
}
- return val, sc.ReadOk(epoch)
+ ok = seq.ReadOk(epoch)
+ return
}
func initParameters() {