summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/coverage/coverage.go39
-rw-r--r--pkg/sentry/kernel/kcov.go4
-rw-r--r--test/syscalls/linux/kcov.cc5
3 files changed, 26 insertions, 22 deletions
diff --git a/pkg/coverage/coverage.go b/pkg/coverage/coverage.go
index 6831adcce..a4f4b2c5e 100644
--- a/pkg/coverage/coverage.go
+++ b/pkg/coverage/coverage.go
@@ -100,12 +100,9 @@ var coveragePool = sync.Pool{
// instrumentation_filter.
//
// Note that we "consume", i.e. clear, coverdata when this function is run, to
-// ensure that each event is only reported once.
-//
-// TODO(b/160639712): evaluate whether it is ok to reset the global coverage
-// data every time this function is run. We could technically have each thread
-// store a local snapshot against which we compare the most recent coverdata so
-// that separate threads do not affect each other's view of the data.
+// ensure that each event is only reported once. Due to the limitations of Go
+// coverage tools, we reset the global coverage data every time this function is
+// run.
func ConsumeCoverageData(w io.Writer) int {
once.Do(initCoverageData)
@@ -117,23 +114,23 @@ func ConsumeCoverageData(w io.Writer) int {
for fileIndex, file := range globalData.files {
counters := coverdata.Cover.Counters[file]
for index := 0; index < len(counters); index++ {
- val := atomic.SwapUint32(&counters[index], 0)
- if val != 0 {
- // Calculate the synthetic PC.
- pc := globalData.syntheticPCs[fileIndex][index]
-
- usermem.ByteOrder.PutUint64(pcBuffer[:], pc)
- n, err := w.Write(pcBuffer[:])
- if err != nil {
- if err == io.EOF {
- // Simply stop writing if we encounter EOF; it's ok if we attempted to
- // write more than we can hold.
- return total + n
- }
- panic(fmt.Sprintf("Internal error writing PCs to kcov area: %v", err))
+ if atomic.LoadUint32(&counters[index]) == 0 {
+ continue
+ }
+ // Non-zero coverage data found; consume it and report as a PC.
+ atomic.StoreUint32(&counters[index], 0)
+ pc := globalData.syntheticPCs[fileIndex][index]
+ usermem.ByteOrder.PutUint64(pcBuffer[:], pc)
+ n, err := w.Write(pcBuffer[:])
+ if err != nil {
+ if err == io.EOF {
+ // Simply stop writing if we encounter EOF; it's ok if we attempted to
+ // write more than we can hold.
+ return total + n
}
- total += n
+ panic(fmt.Sprintf("Internal error writing PCs to kcov area: %v", err))
}
+ total += n
}
}
diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go
index aad63aa99..d3e76ca7b 100644
--- a/pkg/sentry/kernel/kcov.go
+++ b/pkg/sentry/kernel/kcov.go
@@ -89,6 +89,10 @@ func (kcov *Kcov) TaskWork(t *Task) {
kcov.mu.Lock()
defer kcov.mu.Unlock()
+ if kcov.mode != linux.KCOV_TRACE_PC {
+ return
+ }
+
rw := &kcovReadWriter{
mf: kcov.mfp.MemoryFile(),
fr: kcov.mappable.FileRange(),
diff --git a/test/syscalls/linux/kcov.cc b/test/syscalls/linux/kcov.cc
index f3c30444e..6afcb4e75 100644
--- a/test/syscalls/linux/kcov.cc
+++ b/test/syscalls/linux/kcov.cc
@@ -36,12 +36,13 @@ TEST(KcovTest, Kcov) {
constexpr int kSize = 4096;
constexpr int KCOV_INIT_TRACE = 0x80086301;
constexpr int KCOV_ENABLE = 0x6364;
+ constexpr int KCOV_DISABLE = 0x6365;
int fd;
ASSERT_THAT(fd = open("/sys/kernel/debug/kcov", O_RDWR),
AnyOf(SyscallSucceeds(), SyscallFailsWithErrno(ENOENT)));
- // Kcov not enabled.
+ // Kcov not available.
SKIP_IF(errno == ENOENT);
ASSERT_THAT(ioctl(fd, KCOV_INIT_TRACE, kSize), SyscallSucceeds());
@@ -62,6 +63,8 @@ TEST(KcovTest, Kcov) {
// Verify that PCs are in the standard kernel range.
EXPECT_GT(area[i], 0xffffffff7fffffffL);
}
+
+ ASSERT_THAT(ioctl(fd, KCOV_DISABLE, 0), SyscallSucceeds());
}
} // namespace