diff options
author | Zhaozhong Ni <nzz@google.com> | 2018-08-24 14:52:23 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-08-24 14:53:31 -0700 |
commit | a6b00502b04ced2f12cfcf35c6f276cff349737b (patch) | |
tree | d443ea0679091b193bcc5568f0aa5aff3ba1a0f3 /pkg/state/statefile/statefile_test.go | |
parent | 02dfceab6d4c4a2a3342ef69be0265b7ab03e5d7 (diff) |
compressio: support optional hashing and eliminate hashio.
Compared to previous compressio / hashio nesting, there is up to 100% speedup.
PiperOrigin-RevId: 210161269
Change-Id: I481aa9fe980bb817fe465fe34d32ea33fc8abf1c
Diffstat (limited to 'pkg/state/statefile/statefile_test.go')
-rw-r--r-- | pkg/state/statefile/statefile_test.go | 70 |
1 files changed, 31 insertions, 39 deletions
diff --git a/pkg/state/statefile/statefile_test.go b/pkg/state/statefile/statefile_test.go index 66d9581ed..fa3fb9f2c 100644 --- a/pkg/state/statefile/statefile_test.go +++ b/pkg/state/statefile/statefile_test.go @@ -20,9 +20,11 @@ import ( "encoding/base64" "io" "math/rand" + "runtime" "testing" + "time" - "gvisor.googlesource.com/gvisor/pkg/hashio" + "gvisor.googlesource.com/gvisor/pkg/compressio" ) func randomKey() ([]byte, error) { @@ -42,6 +44,8 @@ type testCase struct { } func TestStatefile(t *testing.T) { + rand.Seed(time.Now().Unix()) + cases := []testCase{ // Various data sizes. {"nil", nil, nil}, @@ -59,13 +63,9 @@ func TestStatefile(t *testing.T) { // Make sure we have one longer than the hash length. {"longer than hash", []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), nil}, - // Make sure we have one longer than the segment size. - {"segments", make([]byte, 3*hashio.SegmentSize), nil}, - {"segments minus one", make([]byte, 3*hashio.SegmentSize-1), nil}, - {"segments plus one", make([]byte, 3*hashio.SegmentSize+1), nil}, - {"segments minus hash", make([]byte, 3*hashio.SegmentSize-32), nil}, - {"segments plus hash", make([]byte, 3*hashio.SegmentSize+32), nil}, - {"large", make([]byte, 30*hashio.SegmentSize), nil}, + // Make sure we have one longer than the chunk size. + {"chunks", make([]byte, 3*compressionChunkSize), nil}, + {"large", make([]byte, 30*compressionChunkSize), nil}, // Different metadata. {"one metadata", []byte("data"), map[string]string{"foo": "bar"}}, @@ -130,27 +130,31 @@ func TestStatefile(t *testing.T) { } // Change the data and verify that it fails. - b := append([]byte(nil), bufEncoded.Bytes()...) - b[rand.Intn(len(b))]++ - r, _, err = NewReader(bytes.NewReader(b), key) - if err == nil { - _, err = io.Copy(&bufDecoded, r) - } - if err == nil { - t.Error("got no error: expected error on data corruption") + if key != nil { + b := append([]byte(nil), bufEncoded.Bytes()...) + b[rand.Intn(len(b))]++ + bufDecoded.Reset() + r, _, err = NewReader(bytes.NewReader(b), key) + if err == nil { + _, err = io.Copy(&bufDecoded, r) + } + if err == nil { + t.Error("got no error: expected error on data corruption") + } } // Change the key and verify that it fails. - if key == nil { - key = integrityKey - } else { - key[rand.Intn(len(key))]++ + newKey := integrityKey + if len(key) > 0 { + newKey = append([]byte{}, key...) + newKey[rand.Intn(len(newKey))]++ } - r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), key) + bufDecoded.Reset() + r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), newKey) if err == nil { _, err = io.Copy(&bufDecoded, r) } - if err != hashio.ErrHashMismatch { + if err != compressio.ErrHashMismatch { t.Errorf("got error: %v, expected ErrHashMismatch on key mismatch", err) } }) @@ -159,7 +163,7 @@ func TestStatefile(t *testing.T) { } } -const benchmarkDataSize = 10 * 1024 * 1024 +const benchmarkDataSize = 100 * 1024 * 1024 func benchmark(b *testing.B, size int, write bool, compressible bool) { b.StopTimer() @@ -249,14 +253,6 @@ func benchmark(b *testing.B, size int, write bool, compressible bool) { } } -func BenchmarkWrite1BCompressible(b *testing.B) { - benchmark(b, 1, true, true) -} - -func BenchmarkWrite1BNoncompressible(b *testing.B) { - benchmark(b, 1, true, false) -} - func BenchmarkWrite4KCompressible(b *testing.B) { benchmark(b, 4096, true, true) } @@ -273,14 +269,6 @@ func BenchmarkWrite1MNoncompressible(b *testing.B) { benchmark(b, 1024*1024, true, false) } -func BenchmarkRead1BCompressible(b *testing.B) { - benchmark(b, 1, false, true) -} - -func BenchmarkRead1BNoncompressible(b *testing.B) { - benchmark(b, 1, false, false) -} - func BenchmarkRead4KCompressible(b *testing.B) { benchmark(b, 4096, false, true) } @@ -296,3 +284,7 @@ func BenchmarkRead1MCompressible(b *testing.B) { func BenchmarkRead1MNoncompressible(b *testing.B) { benchmark(b, 1024*1024, false, false) } + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} |