summaryrefslogtreecommitdiffhomepage
path: root/pkg/safemem/block_unsafe.go
diff options
context:
space:
mode:
authorAndrei Vagin <avagin@google.com>2021-04-19 12:59:32 -0700
committergVisor bot <gvisor-bot@google.com>2021-04-19 13:01:59 -0700
commitb0333d33a206b3a3cf3bcc90e44793708ed5cb7a (patch)
tree3d90589a160b34d312e76091a6e0a282fe950d24 /pkg/safemem/block_unsafe.go
parent9b4cc3d43bc79698762e1efa980148f12e8ad196 (diff)
Optimize safemem.Zero
There is a loop that fills a byte array with zero-s. Let's use copy() instead of setting elements one by one. The new implementation is two time faster than the previous one and it is more than 10x faster with the race detector. Reported-by: syzbot+5f57d988a5f929af5a91@syzkaller.appspotmail.com PiperOrigin-RevId: 369283919
Diffstat (limited to 'pkg/safemem/block_unsafe.go')
-rw-r--r--pkg/safemem/block_unsafe.go19
1 files changed, 17 insertions, 2 deletions
diff --git a/pkg/safemem/block_unsafe.go b/pkg/safemem/block_unsafe.go
index 93879bb4f..4af534385 100644
--- a/pkg/safemem/block_unsafe.go
+++ b/pkg/safemem/block_unsafe.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/gohacks"
"gvisor.dev/gvisor/pkg/safecopy"
+ "gvisor.dev/gvisor/pkg/sync"
)
// A Block is a range of contiguous bytes, similar to []byte but with the
@@ -223,8 +224,22 @@ func Copy(dst, src Block) (int, error) {
func Zero(dst Block) (int, error) {
if !dst.needSafecopy {
bs := dst.ToSlice()
- for i := range bs {
- bs[i] = 0
+ if !sync.RaceEnabled {
+ // If the race detector isn't enabled, the golang
+ // compiler replaces the next loop with memclr
+ // (https://github.com/golang/go/issues/5373).
+ for i := range bs {
+ bs[i] = 0
+ }
+ } else {
+ bsLen := len(bs)
+ if bsLen == 0 {
+ return 0, nil
+ }
+ bs[0] = 0
+ for i := 1; i < bsLen; i *= 2 {
+ copy(bs[i:], bs[:i])
+ }
}
return len(bs), nil
}