summaryrefslogtreecommitdiffhomepage
path: root/pkg/coverage
diff options
context:
space:
mode:
authorDean Deng <deandeng@google.com>2021-01-22 11:25:13 -0800
committergVisor bot <gvisor-bot@google.com>2021-01-22 11:32:56 -0800
commit65594d30ad1b1a2ca676c7ea78f4815f83dc4d06 (patch)
tree3a558f3002c4db82b8248f4e754e6fb5f096feb4 /pkg/coverage
parent16b81308cfb7cf2265d69e7180cd024f7c5b89e4 (diff)
Avoid atomic operations in kcov processing.
Atomic operations here significantly slow down gVisor builds with kcov/coverage enabled. Also mark these functions go:norace to avoid complaints from the race detector. PiperOrigin-RevId: 353281865
Diffstat (limited to 'pkg/coverage')
-rw-r--r--pkg/coverage/coverage.go18
1 files changed, 14 insertions, 4 deletions
diff --git a/pkg/coverage/coverage.go b/pkg/coverage/coverage.go
index fdfe31417..6f3d72e83 100644
--- a/pkg/coverage/coverage.go
+++ b/pkg/coverage/coverage.go
@@ -26,7 +26,6 @@ import (
"fmt"
"io"
"sort"
- "sync/atomic"
"testing"
"gvisor.dev/gvisor/pkg/sync"
@@ -69,12 +68,18 @@ var globalData struct {
}
// ClearCoverageData clears existing coverage data.
+//
+//go:norace
func ClearCoverageData() {
coverageMu.Lock()
defer coverageMu.Unlock()
+
+ // We do not use atomic operations while reading/writing to the counters,
+ // which would drastically degrade performance. Slight discrepancies due to
+ // racing is okay for the purposes of kcov.
for _, counters := range coverdata.Cover.Counters {
for index := 0; index < len(counters); index++ {
- atomic.StoreUint32(&counters[index], 0)
+ counters[index] = 0
}
}
}
@@ -114,6 +119,8 @@ var coveragePool = sync.Pool{
// ensure that each event is only reported once. Due to the limitations of Go
// coverage tools, we reset the global coverage data every time this function is
// run.
+//
+//go:norace
func ConsumeCoverageData(w io.Writer) int {
InitCoverageData()
@@ -125,11 +132,14 @@ func ConsumeCoverageData(w io.Writer) int {
for fileNum, file := range globalData.files {
counters := coverdata.Cover.Counters[file]
for index := 0; index < len(counters); index++ {
- if atomic.LoadUint32(&counters[index]) == 0 {
+ // We do not use atomic operations while reading/writing to the counters,
+ // which would drastically degrade performance. Slight discrepancies due to
+ // racing is okay for the purposes of kcov.
+ if counters[index] == 0 {
continue
}
// Non-zero coverage data found; consume it and report as a PC.
- atomic.StoreUint32(&counters[index], 0)
+ counters[index] = 0
pc := globalData.syntheticPCs[fileNum][index]
usermem.ByteOrder.PutUint64(pcBuffer[:], pc)
n, err := w.Write(pcBuffer[:])