diff options
Diffstat (limited to 'pkg/merkletree/merkletree_test.go')
-rw-r--r-- | pkg/merkletree/merkletree_test.go | 406 |
1 files changed, 287 insertions, 119 deletions
diff --git a/pkg/merkletree/merkletree_test.go b/pkg/merkletree/merkletree_test.go index 911f61df9..e1350ebda 100644 --- a/pkg/merkletree/merkletree_test.go +++ b/pkg/merkletree/merkletree_test.go @@ -27,130 +27,153 @@ import ( func TestLayout(t *testing.T) { testCases := []struct { - dataSize int64 - expectedLevelOffset []int64 + dataSize int64 + dataAndTreeInSameFile bool + expectedLevelOffset []int64 }{ { - dataSize: 100, - expectedLevelOffset: []int64{0}, + dataSize: 100, + dataAndTreeInSameFile: false, + expectedLevelOffset: []int64{0}, }, { - dataSize: 1000000, - expectedLevelOffset: []int64{0, 2 * usermem.PageSize, 3 * usermem.PageSize}, + dataSize: 100, + dataAndTreeInSameFile: true, + expectedLevelOffset: []int64{usermem.PageSize}, }, { - dataSize: 4096 * int64(usermem.PageSize), - expectedLevelOffset: []int64{0, 32 * usermem.PageSize, 33 * usermem.PageSize}, + dataSize: 1000000, + dataAndTreeInSameFile: false, + expectedLevelOffset: []int64{0, 2 * usermem.PageSize, 3 * usermem.PageSize}, + }, + { + dataSize: 1000000, + dataAndTreeInSameFile: true, + expectedLevelOffset: []int64{245 * usermem.PageSize, 247 * usermem.PageSize, 248 * usermem.PageSize}, + }, + { + dataSize: 4096 * int64(usermem.PageSize), + dataAndTreeInSameFile: false, + expectedLevelOffset: []int64{0, 32 * usermem.PageSize, 33 * usermem.PageSize}, + }, + { + dataSize: 4096 * int64(usermem.PageSize), + dataAndTreeInSameFile: true, + expectedLevelOffset: []int64{4096 * usermem.PageSize, 4128 * usermem.PageSize, 4129 * usermem.PageSize}, }, } for _, tc := range testCases { t.Run(fmt.Sprintf("%d", tc.dataSize), func(t *testing.T) { - p := InitLayout(tc.dataSize) - if p.blockSize != int64(usermem.PageSize) { - t.Errorf("got blockSize %d, want %d", p.blockSize, usermem.PageSize) + l := InitLayout(tc.dataSize, tc.dataAndTreeInSameFile) + if l.blockSize != int64(usermem.PageSize) { + t.Errorf("Got blockSize %d, want %d", l.blockSize, usermem.PageSize) } - if p.digestSize != sha256DigestSize { - t.Errorf("got digestSize %d, want %d", p.digestSize, sha256DigestSize) + if l.digestSize != sha256DigestSize { + t.Errorf("Got digestSize %d, want %d", l.digestSize, sha256DigestSize) } - if p.numLevels() != len(tc.expectedLevelOffset) { - t.Errorf("got levels %d, want %d", p.numLevels(), len(tc.expectedLevelOffset)) + if l.numLevels() != len(tc.expectedLevelOffset) { + t.Errorf("Got levels %d, want %d", l.numLevels(), len(tc.expectedLevelOffset)) } - for i := 0; i < p.numLevels() && i < len(tc.expectedLevelOffset); i++ { - if p.levelOffset[i] != tc.expectedLevelOffset[i] { - t.Errorf("got levelStart[%d] %d, want %d", i, p.levelOffset[i], tc.expectedLevelOffset[i]) + for i := 0; i < l.numLevels() && i < len(tc.expectedLevelOffset); i++ { + if l.levelOffset[i] != tc.expectedLevelOffset[i] { + t.Errorf("Got levelStart[%d] %d, want %d", i, l.levelOffset[i], tc.expectedLevelOffset[i]) } } }) } } +const ( + defaultName = "merkle_test" + defaultMode = 0644 + defaultUID = 0 + defaultGID = 0 +) + +// bytesReadWriter is used to read from/write to/seek in a byte array. Unlike +// bytes.Buffer, it keeps the whole buffer during read so that it can be reused. +type bytesReadWriter struct { + // bytes contains the underlying byte array. + bytes []byte + // readPos is the currently location for Read. Write always appends to + // the end of the array. + readPos int +} + +func (brw *bytesReadWriter) Write(p []byte) (int, error) { + brw.bytes = append(brw.bytes, p...) + return len(p), nil +} + +func (brw *bytesReadWriter) ReadAt(p []byte, off int64) (int, error) { + bytesRead := copy(p, brw.bytes[off:]) + if bytesRead == 0 { + return bytesRead, io.EOF + } + return bytesRead, nil +} + func TestGenerate(t *testing.T) { // The input data has size dataSize. It starts with the data in startWith, // and all other bytes are zeroes. testCases := []struct { data []byte - expectedRoot []byte + expectedHash []byte }{ { data: bytes.Repeat([]byte{0}, usermem.PageSize), - expectedRoot: []byte{173, 127, 172, 178, 88, 111, 198, 233, 102, 192, 4, 215, 209, 209, 107, 2, 79, 88, 5, 255, 124, 180, 124, 122, 133, 218, 189, 139, 72, 137, 44, 167}, + expectedHash: []byte{64, 253, 58, 72, 192, 131, 82, 184, 193, 33, 108, 142, 43, 46, 179, 134, 244, 21, 29, 190, 14, 39, 66, 129, 6, 46, 200, 211, 30, 247, 191, 252}, }, { data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1), - expectedRoot: []byte{62, 93, 40, 92, 161, 241, 30, 223, 202, 99, 39, 2, 132, 113, 240, 139, 117, 99, 79, 243, 54, 18, 100, 184, 141, 121, 238, 46, 149, 202, 203, 132}, + expectedHash: []byte{182, 223, 218, 62, 65, 185, 160, 219, 93, 119, 186, 88, 205, 32, 122, 231, 173, 72, 78, 76, 65, 57, 177, 146, 159, 39, 44, 123, 230, 156, 97, 26}, }, { data: []byte{'a'}, - expectedRoot: []byte{52, 75, 204, 142, 172, 129, 37, 14, 145, 137, 103, 203, 11, 162, 209, 205, 30, 169, 213, 72, 20, 28, 243, 24, 242, 2, 92, 43, 169, 59, 110, 210}, + expectedHash: []byte{28, 201, 8, 36, 150, 178, 111, 5, 193, 212, 129, 205, 206, 124, 211, 90, 224, 142, 81, 183, 72, 165, 243, 240, 242, 241, 76, 127, 101, 61, 63, 11}, }, { data: bytes.Repeat([]byte{'a'}, usermem.PageSize), - expectedRoot: []byte{201, 62, 238, 45, 13, 176, 47, 16, 172, 199, 70, 13, 149, 118, 225, 34, 220, 248, 205, 83, 196, 191, 141, 252, 174, 27, 62, 116, 235, 207, 255, 90}, + expectedHash: []byte{106, 58, 160, 152, 41, 68, 38, 108, 245, 74, 177, 84, 64, 193, 19, 176, 249, 86, 27, 193, 85, 164, 99, 240, 79, 104, 148, 222, 76, 46, 191, 79}, }, } for _, tc := range testCases { t.Run(fmt.Sprintf("%d:%v", len(tc.data), tc.data[0]), func(t *testing.T) { - var tree bytes.Buffer - - root, err := Generate(bytes.NewBuffer(tc.data), int64(len(tc.data)), &tree, &tree) - if err != nil { - t.Fatalf("Generate failed: %v", err) - } + for _, dataAndTreeInSameFile := range []bool{false, true} { + var tree bytesReadWriter + params := GenerateParams{ + Size: int64(len(tc.data)), + Name: defaultName, + Mode: defaultMode, + UID: defaultUID, + GID: defaultGID, + TreeReader: &tree, + TreeWriter: &tree, + DataAndTreeInSameFile: dataAndTreeInSameFile, + } + if dataAndTreeInSameFile { + tree.Write(tc.data) + params.File = &tree + } else { + params.File = &bytesReadWriter{ + bytes: tc.data, + } + } + hash, err := Generate(¶ms) + if err != nil { + t.Fatalf("Got err: %v, want nil", err) + } - if !bytes.Equal(root, tc.expectedRoot) { - t.Errorf("Unexpected root") + if !bytes.Equal(hash, tc.expectedHash) { + t.Errorf("Got hash: %v, want %v", hash, tc.expectedHash) + } } }) } } -// bytesReadWriter is used to read from/write to/seek in a byte array. Unlike -// bytes.Buffer, it keeps the whole buffer during read so that it can be reused. -type bytesReadWriter struct { - // bytes contains the underlying byte array. - bytes []byte - // readPos is the currently location for Read. Write always appends to - // the end of the array. - readPos int -} - -func (brw *bytesReadWriter) Write(p []byte) (int, error) { - brw.bytes = append(brw.bytes, p...) - return len(p), nil -} - -func (brw *bytesReadWriter) Read(p []byte) (int, error) { - if brw.readPos >= len(brw.bytes) { - return 0, io.EOF - } - bytesRead := copy(p, brw.bytes[brw.readPos:]) - brw.readPos += bytesRead - if bytesRead < len(p) { - return bytesRead, io.EOF - } - return bytesRead, nil -} - -func (brw *bytesReadWriter) Seek(offset int64, whence int) (int64, error) { - off := offset - if whence == io.SeekCurrent { - off += int64(brw.readPos) - } - if whence == io.SeekEnd { - off += int64(len(brw.bytes)) - } - if off < 0 { - panic("seek with negative offset") - } - if off >= int64(len(brw.bytes)) { - return 0, io.EOF - } - brw.readPos = int(off) - return off, nil -} - func TestVerify(t *testing.T) { // The input data has size dataSize. The portion to be verified ranges from // verifyStart with verifySize. A bit is flipped in outOfRangeByteIndex to @@ -165,6 +188,10 @@ func TestVerify(t *testing.T) { // modified byte falls in verification range, Verify should // fail, otherwise Verify should still succeed. modifyByte int64 + modifyName bool + modifyMode bool + modifyUID bool + modifyGID bool shouldSucceed bool }{ // Verify range start outside the data range should fail. @@ -193,12 +220,48 @@ func TestVerify(t *testing.T) { modifyByte: 0, shouldSucceed: false, }, - // Invalid verify range (0 size) should fail. + // 0 verify size should only verify metadata. + { + dataSize: usermem.PageSize, + verifyStart: 0, + verifySize: 0, + modifyByte: 0, + shouldSucceed: true, + }, + // Modified name should fail verification. + { + dataSize: usermem.PageSize, + verifyStart: 0, + verifySize: 0, + modifyByte: 0, + modifyName: true, + shouldSucceed: false, + }, + // Modified mode should fail verification. + { + dataSize: usermem.PageSize, + verifyStart: 0, + verifySize: 0, + modifyByte: 0, + modifyMode: true, + shouldSucceed: false, + }, + // Modified UID should fail verification. { dataSize: usermem.PageSize, verifyStart: 0, verifySize: 0, modifyByte: 0, + modifyUID: true, + shouldSucceed: false, + }, + // Modified GID should fail verification. + { + dataSize: usermem.PageSize, + verifyStart: 0, + verifySize: 0, + modifyByte: 0, + modifyGID: true, shouldSucceed: false, }, // The test cases below use a block-aligned verify range. @@ -284,26 +347,79 @@ func TestVerify(t *testing.T) { data := make([]byte, tc.dataSize) // Generate random bytes in data. rand.Read(data) - var tree bytesReadWriter - root, err := Generate(bytes.NewBuffer(data), int64(tc.dataSize), &tree, &tree) - if err != nil { - t.Fatalf("Generate failed: %v", err) - } + for _, dataAndTreeInSameFile := range []bool{false, true} { + var tree bytesReadWriter + genParams := GenerateParams{ + Size: int64(len(data)), + Name: defaultName, + Mode: defaultMode, + UID: defaultUID, + GID: defaultGID, + TreeReader: &tree, + TreeWriter: &tree, + DataAndTreeInSameFile: dataAndTreeInSameFile, + } + if dataAndTreeInSameFile { + tree.Write(data) + genParams.File = &tree + } else { + genParams.File = &bytesReadWriter{ + bytes: data, + } + } + hash, err := Generate(&genParams) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } - // Flip a bit in data and checks Verify results. - var buf bytes.Buffer - data[tc.modifyByte] ^= 1 - if tc.shouldSucceed { - if err := Verify(&buf, bytes.NewReader(data), &tree, tc.dataSize, tc.verifyStart, tc.verifySize, root); err != nil && err != io.EOF { - t.Errorf("Verification failed when expected to succeed: %v", err) + // Flip a bit in data and checks Verify results. + var buf bytes.Buffer + data[tc.modifyByte] ^= 1 + verifyParams := VerifyParams{ + Out: &buf, + File: bytes.NewReader(data), + Tree: &tree, + Size: tc.dataSize, + Name: defaultName, + Mode: defaultMode, + UID: defaultUID, + GID: defaultGID, + ReadOffset: tc.verifyStart, + ReadSize: tc.verifySize, + Expected: hash, + DataAndTreeInSameFile: dataAndTreeInSameFile, + } + if tc.modifyName { + verifyParams.Name = defaultName + "abc" + } + if tc.modifyMode { + verifyParams.Mode = defaultMode + 1 } - if int64(buf.Len()) != tc.verifySize || !bytes.Equal(data[tc.verifyStart:tc.verifyStart+tc.verifySize], buf.Bytes()) { - t.Errorf("Incorrect output from Verify") + if tc.modifyUID { + verifyParams.UID = defaultUID + 1 } - } else { - if err := Verify(&buf, bytes.NewReader(data), &tree, tc.dataSize, tc.verifyStart, tc.verifySize, root); err == nil { - t.Errorf("Verification succeeded when expected to fail") + if tc.modifyGID { + verifyParams.GID = defaultGID + 1 + } + if tc.shouldSucceed { + n, err := Verify(&verifyParams) + if err != nil && err != io.EOF { + t.Errorf("Verification failed when expected to succeed: %v", err) + } + if n != tc.verifySize { + t.Errorf("Got Verify output size %d, want %d", n, tc.verifySize) + } + if int64(buf.Len()) != tc.verifySize { + t.Errorf("Got Verify output buf size %d, want %d,", buf.Len(), tc.verifySize) + } + if !bytes.Equal(data[tc.verifyStart:tc.verifyStart+tc.verifySize], buf.Bytes()) { + t.Errorf("Incorrect output buf from Verify") + } + } else { + if _, err := Verify(&verifyParams); err == nil { + t.Errorf("Verification succeeded when expected to fail") + } } } }) @@ -318,36 +434,88 @@ func TestVerifyRandom(t *testing.T) { data := make([]byte, dataSize) // Generate random bytes in data. rand.Read(data) - var tree bytesReadWriter - root, err := Generate(bytes.NewBuffer(data), int64(dataSize), &tree, &tree) - if err != nil { - t.Fatalf("Generate failed: %v", err) - } + for _, dataAndTreeInSameFile := range []bool{false, true} { + var tree bytesReadWriter + genParams := GenerateParams{ + Size: int64(len(data)), + Name: defaultName, + Mode: defaultMode, + UID: defaultUID, + GID: defaultGID, + TreeReader: &tree, + TreeWriter: &tree, + DataAndTreeInSameFile: dataAndTreeInSameFile, + } - // Pick a random portion of data. - start := rand.Int63n(dataSize - 1) - size := rand.Int63n(dataSize) + 1 + if dataAndTreeInSameFile { + tree.Write(data) + genParams.File = &tree + } else { + genParams.File = &bytesReadWriter{ + bytes: data, + } + } + hash, err := Generate(&genParams) + if err != nil { + t.Fatalf("Generate failed: %v", err) + } - var buf bytes.Buffer - // Checks that the random portion of data from the original data is - // verified successfully. - if err := Verify(&buf, bytes.NewReader(data), &tree, dataSize, start, size, root); err != nil && err != io.EOF { - t.Errorf("Verification failed for correct data: %v", err) - } - if size > dataSize-start { - size = dataSize - start - } - if int64(buf.Len()) != size || !bytes.Equal(data[start:start+size], buf.Bytes()) { - t.Errorf("Incorrect output from Verify") - } + // Pick a random portion of data. + start := rand.Int63n(dataSize - 1) + size := rand.Int63n(dataSize) + 1 + + var buf bytes.Buffer + verifyParams := VerifyParams{ + Out: &buf, + File: bytes.NewReader(data), + Tree: &tree, + Size: dataSize, + Name: defaultName, + Mode: defaultMode, + UID: defaultUID, + GID: defaultGID, + ReadOffset: start, + ReadSize: size, + Expected: hash, + DataAndTreeInSameFile: dataAndTreeInSameFile, + } + + // Checks that the random portion of data from the original data is + // verified successfully. + n, err := Verify(&verifyParams) + if err != nil && err != io.EOF { + t.Errorf("Verification failed for correct data: %v", err) + } + if size > dataSize-start { + size = dataSize - start + } + if n != size { + t.Errorf("Got Verify output size %d, want %d", n, size) + } + if int64(buf.Len()) != size { + t.Errorf("Got Verify output buf size %d, want %d", buf.Len(), size) + } + if !bytes.Equal(data[start:start+size], buf.Bytes()) { + t.Errorf("Incorrect output buf from Verify") + } + + // Verify that modified metadata should fail verification. + buf.Reset() + verifyParams.Name = defaultName + "abc" + if _, err := Verify(&verifyParams); err == nil { + t.Error("Verify succeeded for modified metadata, expect failure") + } - buf.Reset() - // Flip a random bit in randPortion, and check that verification fails. - randBytePos := rand.Int63n(size) - data[start+randBytePos] ^= 1 + // Flip a random bit in randPortion, and check that verification fails. + buf.Reset() + randBytePos := rand.Int63n(size) + data[start+randBytePos] ^= 1 + verifyParams.File = bytes.NewReader(data) + verifyParams.Name = defaultName - if err := Verify(&buf, bytes.NewReader(data), &tree, dataSize, start, size, root); err == nil { - t.Errorf("Verification succeeded for modified data") + if _, err := Verify(&verifyParams); err == nil { + t.Error("Verification succeeded for modified data, expect failure") + } } } |