diff options
author | Dean Deng <deandeng@google.com> | 2020-09-27 15:31:43 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-09-27 15:33:51 -0700 |
commit | 2a60f9229166effac64653be4f46683ea1a0cd87 (patch) | |
tree | 96264a77d7b541a884e41bb85efaadcb23f4d4eb /pkg | |
parent | a376a0baf362506549fcc58861465fa89ed33f7f (diff) |
Clean up kcov.
Previously, we did not check the kcov mode when performing task work. As a
result, disabling kcov did not do anything.
Also avoid expensive atomic RMW when consuming coverage data. We don't need the
swap if the value is already zero (which is most of the time), and it is ok if
there are slight inconsistencies due to a race between coverage data generation
(incrementing the value) and consumption (reading a nonzero value and writing
zero).
PiperOrigin-RevId: 334049207
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/coverage/coverage.go | 39 | ||||
-rw-r--r-- | pkg/sentry/kernel/kcov.go | 4 |
2 files changed, 22 insertions, 21 deletions
diff --git a/pkg/coverage/coverage.go b/pkg/coverage/coverage.go index 6831adcce..a4f4b2c5e 100644 --- a/pkg/coverage/coverage.go +++ b/pkg/coverage/coverage.go @@ -100,12 +100,9 @@ var coveragePool = sync.Pool{ // instrumentation_filter. // // Note that we "consume", i.e. clear, coverdata when this function is run, to -// ensure that each event is only reported once. -// -// TODO(b/160639712): evaluate whether it is ok to reset the global coverage -// data every time this function is run. We could technically have each thread -// store a local snapshot against which we compare the most recent coverdata so -// that separate threads do not affect each other's view of the data. +// ensure that each event is only reported once. Due to the limitations of Go +// coverage tools, we reset the global coverage data every time this function is +// run. func ConsumeCoverageData(w io.Writer) int { once.Do(initCoverageData) @@ -117,23 +114,23 @@ func ConsumeCoverageData(w io.Writer) int { for fileIndex, file := range globalData.files { counters := coverdata.Cover.Counters[file] for index := 0; index < len(counters); index++ { - val := atomic.SwapUint32(&counters[index], 0) - if val != 0 { - // Calculate the synthetic PC. - pc := globalData.syntheticPCs[fileIndex][index] - - usermem.ByteOrder.PutUint64(pcBuffer[:], pc) - n, err := w.Write(pcBuffer[:]) - if err != nil { - if err == io.EOF { - // Simply stop writing if we encounter EOF; it's ok if we attempted to - // write more than we can hold. - return total + n - } - panic(fmt.Sprintf("Internal error writing PCs to kcov area: %v", err)) + if atomic.LoadUint32(&counters[index]) == 0 { + continue + } + // Non-zero coverage data found; consume it and report as a PC. + atomic.StoreUint32(&counters[index], 0) + pc := globalData.syntheticPCs[fileIndex][index] + usermem.ByteOrder.PutUint64(pcBuffer[:], pc) + n, err := w.Write(pcBuffer[:]) + if err != nil { + if err == io.EOF { + // Simply stop writing if we encounter EOF; it's ok if we attempted to + // write more than we can hold. + return total + n } - total += n + panic(fmt.Sprintf("Internal error writing PCs to kcov area: %v", err)) } + total += n } } diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go index aad63aa99..d3e76ca7b 100644 --- a/pkg/sentry/kernel/kcov.go +++ b/pkg/sentry/kernel/kcov.go @@ -89,6 +89,10 @@ func (kcov *Kcov) TaskWork(t *Task) { kcov.mu.Lock() defer kcov.mu.Unlock() + if kcov.mode != linux.KCOV_TRACE_PC { + return + } + rw := &kcovReadWriter{ mf: kcov.mfp.MemoryFile(), fr: kcov.mappable.FileRange(), |