diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-09-02 18:40:29 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-09-02 18:40:29 +0000 |
commit | 5482f942109e2c3098df56185bb17f4142e241be (patch) | |
tree | a04371cbfd333439fdbed38de434869a84be6b49 /pkg/sentry | |
parent | 347732d7c3162466469480e37d2cf682e333a4e5 (diff) | |
parent | 9bd01642377eef9d91f9243091b6da28f37d164a (diff) |
Merge release-20200818.0-111-g9bd016423 (automated)
Diffstat (limited to 'pkg/sentry')
-rw-r--r-- | pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go | 36 | ||||
-rw-r--r-- | pkg/sentry/time/seqatomic_parameters_unsafe.go | 36 |
2 files changed, 30 insertions, 42 deletions
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go index 950645965..90148bbb2 100644 --- a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go +++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go @@ -10,39 +10,33 @@ import ( ) // SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race -// with any writer critical sections in sc. -func SeqAtomicLoadTaskGoroutineSchedInfo(sc *sync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo { - // This function doesn't use SeqAtomicTryLoad because doing so is - // measurably, significantly (~20%) slower; Go is awful at inlining. - var val TaskGoroutineSchedInfo +// with any writer critical sections in seq. +// +//go:nosplit +func SeqAtomicLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo { for { - epoch := sc.BeginRead() - if sync.RaceEnabled { - - sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) - } else { - - val = *ptr - } - if sc.ReadOk(epoch) { - break + if val, ok := SeqAtomicTryLoadTaskGoroutineSchedInfo(seq, seq.BeginRead(), ptr); ok { + return val } } - return val } // SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section -// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read -// would race with a writer critical section, SeqAtomicTryLoad returns +// in seq initiated by a call to seq.BeginRead() that returned epoch. If the +// read would race with a writer critical section, SeqAtomicTryLoad returns // (unspecified, false). -func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) { - var val TaskGoroutineSchedInfo +// +//go:nosplit +func SeqAtomicTryLoadTaskGoroutineSchedInfo(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (val TaskGoroutineSchedInfo, ok bool) { if sync.RaceEnabled { + sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) } else { + val = *ptr } - return val, sc.ReadOk(epoch) + ok = seq.ReadOk(epoch) + return } func initTaskGoroutineSchedInfo() { diff --git a/pkg/sentry/time/seqatomic_parameters_unsafe.go b/pkg/sentry/time/seqatomic_parameters_unsafe.go index efd3ccae2..2cb001080 100644 --- a/pkg/sentry/time/seqatomic_parameters_unsafe.go +++ b/pkg/sentry/time/seqatomic_parameters_unsafe.go @@ -10,39 +10,33 @@ import ( ) // SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race -// with any writer critical sections in sc. -func SeqAtomicLoadParameters(sc *sync.SeqCount, ptr *Parameters) Parameters { - // This function doesn't use SeqAtomicTryLoad because doing so is - // measurably, significantly (~20%) slower; Go is awful at inlining. - var val Parameters +// with any writer critical sections in seq. +// +//go:nosplit +func SeqAtomicLoadParameters(seq *sync.SeqCount, ptr *Parameters) Parameters { for { - epoch := sc.BeginRead() - if sync.RaceEnabled { - - sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) - } else { - - val = *ptr - } - if sc.ReadOk(epoch) { - break + if val, ok := SeqAtomicTryLoadParameters(seq, seq.BeginRead(), ptr); ok { + return val } } - return val } // SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section -// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read -// would race with a writer critical section, SeqAtomicTryLoad returns +// in seq initiated by a call to seq.BeginRead() that returned epoch. If the +// read would race with a writer critical section, SeqAtomicTryLoad returns // (unspecified, false). -func SeqAtomicTryLoadParameters(sc *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Parameters) (Parameters, bool) { - var val Parameters +// +//go:nosplit +func SeqAtomicTryLoadParameters(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Parameters) (val Parameters, ok bool) { if sync.RaceEnabled { + sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) } else { + val = *ptr } - return val, sc.ReadOk(epoch) + ok = seq.ReadOk(epoch) + return } func initParameters() { |