summaryrefslogtreecommitdiffhomepage
path: root/pkg/merkletree/merkletree.go
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/merkletree/merkletree.go')
-rw-r--r--pkg/merkletree/merkletree.go91
1 files changed, 59 insertions, 32 deletions
diff --git a/pkg/merkletree/merkletree.go b/pkg/merkletree/merkletree.go
index 0b961d3d9..6358ad8e9 100644
--- a/pkg/merkletree/merkletree.go
+++ b/pkg/merkletree/merkletree.go
@@ -384,6 +384,14 @@ func verifyMetadata(params *VerifyParams, layout *Layout) error {
return descriptor.verify(params.Expected, params.HashAlgorithms)
}
+// cachedHashes stores verified hashes from a previous hash step.
+type cachedHashes struct {
+ // offset is the offset of cached hash in each level.
+ offset []int64
+ // hash is the verified cache for each level from previous hash steps.
+ hash [][]byte
+}
+
// Verify verifies the content read from data with offset. The content is
// verified against tree. If content spans across multiple blocks, each block is
// verified. Verification fails if the hash of the data does not match the tree
@@ -409,29 +417,32 @@ func Verify(params *VerifyParams) (int64, error) {
firstDataBlock := params.ReadOffset / layout.blockSize
lastDataBlock := (params.ReadOffset + params.ReadSize - 1) / layout.blockSize
- buf := make([]byte, layout.blockSize)
- var readErr error
- total := int64(0)
+ size := (lastDataBlock - firstDataBlock + 1) * layout.blockSize
+ retBuf := make([]byte, size)
+ n, err := params.File.ReadAt(retBuf, firstDataBlock*layout.blockSize)
+ if err != nil && err != io.EOF {
+ return 0, err
+ }
+ total := int64(n)
+ bytesRead := int64(0)
+
+ // Only cache hash results if reading more than a block.
+ var ch *cachedHashes
+ if lastDataBlock > firstDataBlock {
+ ch = &cachedHashes{
+ offset: make([]int64, layout.numLevels()),
+ hash: make([][]byte, layout.numLevels()),
+ }
+ }
for i := firstDataBlock; i <= lastDataBlock; i++ {
+ // Reach the end of file during verification.
+ if total <= 0 {
+ return bytesRead, io.EOF
+ }
// Read a block that includes all or part of target range in
// input data.
- bytesRead, err := params.File.ReadAt(buf, i*layout.blockSize)
- readErr = err
- // If at the end of input data and all previous blocks are
- // verified, return the verified input data and EOF.
- if readErr == io.EOF && bytesRead == 0 {
- break
- }
- if readErr != nil && readErr != io.EOF {
- return 0, fmt.Errorf("read from data failed: %w", err)
- }
- // If this is the end of file, zero the remaining bytes in buf,
- // otherwise they are still from the previous block.
- if bytesRead < len(buf) {
- for j := bytesRead; j < len(buf); j++ {
- buf[j] = 0
- }
- }
+ buf := retBuf[(i-firstDataBlock)*layout.blockSize : (i-firstDataBlock+1)*layout.blockSize]
+
descriptor := VerityDescriptor{
Name: params.Name,
FileSize: params.Size,
@@ -441,8 +452,8 @@ func Verify(params *VerifyParams) (int64, error) {
SymlinkTarget: params.SymlinkTarget,
Children: params.Children,
}
- if err := verifyBlock(params.Tree, &descriptor, &layout, buf, i, params.HashAlgorithms, params.Expected); err != nil {
- return 0, err
+ if err := verifyBlock(params.Tree, &descriptor, &layout, buf, i, params.HashAlgorithms, params.Expected, ch); err != nil {
+ return bytesRead, err
}
// startOff is the beginning of the read range within the
@@ -459,22 +470,24 @@ func Verify(params *VerifyParams) (int64, error) {
if i == lastDataBlock {
endOff = (params.ReadOffset+params.ReadSize-1)%layout.blockSize + 1
}
+
// If the provided size exceeds the end of input data, we should
// only copy the parts in buf that's part of input data.
- if startOff > int64(bytesRead) {
- startOff = int64(bytesRead)
+ if startOff > total {
+ startOff = total
}
- if endOff > int64(bytesRead) {
- endOff = int64(bytesRead)
+ if endOff > total {
+ endOff = total
}
+
n, err := params.Out.Write(buf[startOff:endOff])
if err != nil {
- return total, err
+ return bytesRead, err
}
- total += int64(n)
-
+ bytesRead += int64(n)
+ total -= endOff
}
- return total, readErr
+ return bytesRead, nil
}
// verifyBlock verifies a block against tree. index is the number of block in
@@ -482,7 +495,7 @@ func Verify(params *VerifyParams) (int64, error) {
// fails if the calculated hash from block is different from any level of
// hashes stored in tree. And the final root hash is compared with
// expected.
-func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, dataBlock []byte, blockIndex int64, hashAlgorithms int, expected []byte) error {
+func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, dataBlock []byte, blockIndex int64, hashAlgorithms int, expected []byte, ch *cachedHashes) error {
if len(dataBlock) != int(layout.blockSize) {
return fmt.Errorf("incorrect block size")
}
@@ -491,6 +504,12 @@ func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout,
treeBlock := make([]byte, layout.blockSize)
var digest []byte
for level := 0; level < layout.numLevels(); level++ {
+ // No need to verify remaining levels if the current block has
+ // been verified in a previous call and cached.
+ if ch != nil && ch.offset[level] == layout.digestOffset(level, blockIndex) && ch.hash[level] != nil {
+ break
+ }
+
// Calculate hash.
if level == 0 {
h, err := hashData(dataBlock, hashAlgorithms)
@@ -521,11 +540,19 @@ func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout,
if !bytes.Equal(digest, expectedDigest) {
return fmt.Errorf("verification failed")
}
+ if ch != nil {
+ ch.offset[level] = layout.digestOffset(level, blockIndex)
+ ch.hash[level] = expectedDigest
+ }
blockIndex = blockIndex / layout.hashesPerBlock()
}
// Verification for the tree succeeded. Now hash the descriptor with
// the root hash and compare it with expected.
- descriptor.RootHash = digest
+ if ch != nil {
+ descriptor.RootHash = ch.hash[layout.rootLevel()]
+ } else {
+ descriptor.RootHash = digest
+ }
return descriptor.verify(expected, hashAlgorithms)
}