diff options
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | debian/show_paths.bzl | 9 | ||||
-rw-r--r-- | pkg/merkletree/merkletree.go | 36 | ||||
-rw-r--r-- | pkg/sentry/platform/kvm/bluepill_fault.go | 2 | ||||
-rw-r--r-- | tools/bazel.mk | 20 | ||||
-rw-r--r-- | tools/show_paths.bzl | 25 |
6 files changed, 61 insertions, 33 deletions
@@ -444,7 +444,7 @@ $(RELEASE_ARTIFACTS)/%: @mkdir -p $@ @$(call copy,//runsc:runsc,$@) @$(call copy,//shim:containerd-shim-runsc-v1,$@) - @$(call deb_copy,//debian:debian,$@) + @$(call copy,//debian:debian,$@) release: $(RELEASE_KEY) $(RELEASE_ARTIFACTS)/$(ARCH) @mkdir -p $(RELEASE_ROOT) diff --git a/debian/show_paths.bzl b/debian/show_paths.bzl deleted file mode 100644 index 366b9d2e8..000000000 --- a/debian/show_paths.bzl +++ /dev/null @@ -1,9 +0,0 @@ -"""Formatter to extract the output files from pkg_deb.""" - -def format(target): - provider_map = providers(target) - return "\n".join([ - provider_map["OutputGroupInfo"].out.to_list()[0].path, - provider_map["OutputGroupInfo"].deb.to_list()[0].path, - provider_map["OutputGroupInfo"].changes.to_list()[0].path, - ]) diff --git a/pkg/merkletree/merkletree.go b/pkg/merkletree/merkletree.go index e6cdc0f89..6358ad8e9 100644 --- a/pkg/merkletree/merkletree.go +++ b/pkg/merkletree/merkletree.go @@ -384,6 +384,14 @@ func verifyMetadata(params *VerifyParams, layout *Layout) error { return descriptor.verify(params.Expected, params.HashAlgorithms) } +// cachedHashes stores verified hashes from a previous hash step. +type cachedHashes struct { + // offset is the offset of cached hash in each level. + offset []int64 + // hash is the verified cache for each level from previous hash steps. + hash [][]byte +} + // Verify verifies the content read from data with offset. The content is // verified against tree. If content spans across multiple blocks, each block is // verified. Verification fails if the hash of the data does not match the tree @@ -418,6 +426,14 @@ func Verify(params *VerifyParams) (int64, error) { total := int64(n) bytesRead := int64(0) + // Only cache hash results if reading more than a block. + var ch *cachedHashes + if lastDataBlock > firstDataBlock { + ch = &cachedHashes{ + offset: make([]int64, layout.numLevels()), + hash: make([][]byte, layout.numLevels()), + } + } for i := firstDataBlock; i <= lastDataBlock; i++ { // Reach the end of file during verification. if total <= 0 { @@ -436,7 +452,7 @@ func Verify(params *VerifyParams) (int64, error) { SymlinkTarget: params.SymlinkTarget, Children: params.Children, } - if err := verifyBlock(params.Tree, &descriptor, &layout, buf, i, params.HashAlgorithms, params.Expected); err != nil { + if err := verifyBlock(params.Tree, &descriptor, &layout, buf, i, params.HashAlgorithms, params.Expected, ch); err != nil { return bytesRead, err } @@ -479,7 +495,7 @@ func Verify(params *VerifyParams) (int64, error) { // fails if the calculated hash from block is different from any level of // hashes stored in tree. And the final root hash is compared with // expected. -func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, dataBlock []byte, blockIndex int64, hashAlgorithms int, expected []byte) error { +func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, dataBlock []byte, blockIndex int64, hashAlgorithms int, expected []byte, ch *cachedHashes) error { if len(dataBlock) != int(layout.blockSize) { return fmt.Errorf("incorrect block size") } @@ -488,6 +504,12 @@ func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, treeBlock := make([]byte, layout.blockSize) var digest []byte for level := 0; level < layout.numLevels(); level++ { + // No need to verify remaining levels if the current block has + // been verified in a previous call and cached. + if ch != nil && ch.offset[level] == layout.digestOffset(level, blockIndex) && ch.hash[level] != nil { + break + } + // Calculate hash. if level == 0 { h, err := hashData(dataBlock, hashAlgorithms) @@ -518,11 +540,19 @@ func verifyBlock(tree io.ReaderAt, descriptor *VerityDescriptor, layout *Layout, if !bytes.Equal(digest, expectedDigest) { return fmt.Errorf("verification failed") } + if ch != nil { + ch.offset[level] = layout.digestOffset(level, blockIndex) + ch.hash[level] = expectedDigest + } blockIndex = blockIndex / layout.hashesPerBlock() } // Verification for the tree succeeded. Now hash the descriptor with // the root hash and compare it with expected. - descriptor.RootHash = digest + if ch != nil { + descriptor.RootHash = ch.hash[layout.rootLevel()] + } else { + descriptor.RootHash = digest + } return descriptor.verify(expected, hashAlgorithms) } diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go index 5a8f1186c..7a3c97c5a 100644 --- a/pkg/sentry/platform/kvm/bluepill_fault.go +++ b/pkg/sentry/platform/kvm/bluepill_fault.go @@ -55,7 +55,7 @@ func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virt } // Adjust the block to match our size. - physicalStart = pr.physical + (alignedPhysical - pr.physical) & faultBlockMask + physicalStart = pr.physical + (alignedPhysical-pr.physical)&faultBlockMask virtualStart = pr.virtual + (physicalStart - pr.physical) physicalEnd := physicalStart + faultBlockSize if physicalEnd > end { diff --git a/tools/bazel.mk b/tools/bazel.mk index 5893c7c7e..4f979bbeb 100644 --- a/tools/bazel.mk +++ b/tools/bazel.mk @@ -181,35 +181,17 @@ endif # build_paths extracts the built binary from the bazel stderr output. # -# This could be alternately done by parsing the bazel build event stream, but -# this is a complex schema, and begs the question: what will build the thing -# that parses the output? Bazel? Do we need a separate bootstrapping build -# command here? Yikes, let's just stick with the ugly shell pipeline. -# # The last line is used to prevent terminal shenanigans. build_paths = \ (set -euo pipefail; \ - $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \ - | tee /dev/fd/2 \ - | sed -n -e '/^Target/,$$p' \ - | sed -n -e '/^ \($(subst /,\/,$(subst $(SPACE),\|,$(BUILD_ROOTS)))\)/p' \ - | sed -e 's/ /\n/g' \ - | awk '{$$1=$$1};1' \ - | strings \ - | xargs -r -n 1 -I {} readlink -f "{}" \ - | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)') - -debian_paths = \ - (set -euo pipefail; \ $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) && \ - $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=debian/show_paths.bzl) \ + $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=tools/show_paths.bzl) \ | xargs -r -n 1 -I {} readlink -f "{}" \ | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)') clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean) build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {}) copy = $(call header,COPY $(1) $(2)) && $(call build_paths,$(1),cp -fa {} $(2)) -deb_copy = $(call header,COPY $(1) $(2)) && $(call debian_paths,$(1),cp -fa {} $(2)) run = $(call header,RUN $(1) $(2)) && $(call build_paths,$(1),{} $(2)) sudo = $(call header,SUDO $(1) $(2)) && $(call build_paths,$(1),sudo -E {} $(2)) test = $(call header,TEST $(1)) && $(call wrapper,$(BAZEL) test $(TEST_OPTIONS) $(1)) diff --git a/tools/show_paths.bzl b/tools/show_paths.bzl new file mode 100644 index 000000000..ba78d3494 --- /dev/null +++ b/tools/show_paths.bzl @@ -0,0 +1,25 @@ +"""Formatter to extract the output files from a target.""" + +def format(target): + provider_map = providers(target) + outputs = dict() + + # Try to resolve in order. + files_to_run = provider_map.get("FilesToRunProvider", None) + default_info = provider_map.get("DefaultInfo", None) + output_group_info = provider_map.get("OutputGroupInfo", None) + if files_to_run and files_to_run.executable: + outputs[files_to_run.executable.path] = True + elif default_info: + for x in default_info.files: + outputs[x.path] = True + elif output_group_info: + for entry in dir(output_group_info): + # Filter out all built-ins and anything that is not a depset. + if entry.startswith("_") or not hasattr(getattr(output_group_info, entry), "to_list"): + continue + for x in getattr(output_group_info, entry).to_list(): + outputs[x.path] = True + + # Return all found files. + return "\n".join(outputs.keys()) |