summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorChong Cai <chongc@google.com>2020-10-09 17:27:24 -0700
committergVisor bot <gvisor-bot@google.com>2020-10-09 17:29:34 -0700
commit5389e441a5480450eb430adf10a552dd3dd1d4f7 (patch)
tree963102776a916f423eaa71aff80d29c396eb2376 /pkg
parent79a5910c04ed18901f755588003ca62d0646b763 (diff)
Include stat in Verity hash
PiperOrigin-RevId: 336395445
Diffstat (limited to 'pkg')
-rw-r--r--pkg/merkletree/merkletree.go222
-rw-r--r--pkg/merkletree/merkletree_test.go175
-rw-r--r--pkg/sentry/fsimpl/verity/filesystem.go145
-rw-r--r--pkg/sentry/fsimpl/verity/verity.go74
-rw-r--r--pkg/sentry/fsimpl/verity/verity_test.go103
5 files changed, 602 insertions, 117 deletions
diff --git a/pkg/merkletree/merkletree.go b/pkg/merkletree/merkletree.go
index ccef83259..a6e698f57 100644
--- a/pkg/merkletree/merkletree.go
+++ b/pkg/merkletree/merkletree.go
@@ -123,45 +123,87 @@ func (layout Layout) blockOffset(level int, index int64) int64 {
return layout.levelOffset[level] + index*layout.blockSize
}
+// VerityDescriptor is a struct that is serialized and hashed to get a file's
+// root hash, which contains the root hash of the raw content and the file's
+// meatadata.
+type VerityDescriptor struct {
+ Name string
+ Mode uint32
+ UID uint32
+ GID uint32
+ RootHash []byte
+}
+
+func (d *VerityDescriptor) String() string {
+ return fmt.Sprintf("Name: %s, Mode: %d, UID: %d, GID: %d, RootHash: %v", d.Name, d.Mode, d.UID, d.GID, d.RootHash)
+}
+
+// GenerateParams contains the parameters used to generate a Merkle tree.
+type GenerateParams struct {
+ // File is a reader of the file to be hashed.
+ File io.ReadSeeker
+ // Size is the size of the file.
+ Size int64
+ // Name is the name of the target file.
+ Name string
+ // Mode is the mode of the target file.
+ Mode uint32
+ // UID is the user ID of the target file.
+ UID uint32
+ // GID is the group ID of the target file.
+ GID uint32
+ // TreeReader is a reader for the Merkle tree.
+ TreeReader io.ReadSeeker
+ // TreeWriter is a writer for the Merkle tree.
+ TreeWriter io.WriteSeeker
+ // DataAndTreeInSameFile is true if data and Merkle tree are in the same
+ // file, or false if Merkle tree is a separate file from data.
+ DataAndTreeInSameFile bool
+}
+
// Generate constructs a Merkle tree for the contents of data. The output is
// written to treeWriter. The treeReader should be able to read the tree after
// it has been written. That is, treeWriter and treeReader should point to the
// same underlying data but have separate cursors.
+//
+// Generate returns a hash of descriptor. The descriptor contains the file
+// metadata and the hash from file content.
+//
// Generate will modify the cursor for data, but always restores it to its
// original position upon exit. The cursor for tree is modified and not
// restored.
-func Generate(data io.ReadSeeker, dataSize int64, treeReader io.ReadSeeker, treeWriter io.WriteSeeker, dataAndTreeInSameFile bool) ([]byte, error) {
- layout := InitLayout(dataSize, dataAndTreeInSameFile)
+func Generate(params *GenerateParams) ([]byte, error) {
+ layout := InitLayout(params.Size, params.DataAndTreeInSameFile)
- numBlocks := (dataSize + layout.blockSize - 1) / layout.blockSize
+ numBlocks := (params.Size + layout.blockSize - 1) / layout.blockSize
// If the data is in the same file as the tree, zero pad the last data
// block.
- bytesInLastBlock := dataSize % layout.blockSize
- if dataAndTreeInSameFile && bytesInLastBlock != 0 {
+ bytesInLastBlock := params.Size % layout.blockSize
+ if params.DataAndTreeInSameFile && bytesInLastBlock != 0 {
zeroBuf := make([]byte, layout.blockSize-bytesInLastBlock)
- if _, err := treeWriter.Seek(0, io.SeekEnd); err != nil && err != io.EOF {
+ if _, err := params.TreeWriter.Seek(0, io.SeekEnd); err != nil && err != io.EOF {
return nil, err
}
- if _, err := treeWriter.Write(zeroBuf); err != nil {
+ if _, err := params.TreeWriter.Write(zeroBuf); err != nil {
return nil, err
}
}
// Store the current offset, so we can set it back once verification
// finishes.
- origOffset, err := data.Seek(0, io.SeekCurrent)
+ origOffset, err := params.File.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
- defer data.Seek(origOffset, io.SeekStart)
+ defer params.File.Seek(origOffset, io.SeekStart)
// Read from the beginning of both data and treeReader.
- if _, err := data.Seek(0, io.SeekStart); err != nil && err != io.EOF {
+ if _, err := params.File.Seek(0, io.SeekStart); err != nil && err != io.EOF {
return nil, err
}
- if _, err := treeReader.Seek(0, io.SeekStart); err != nil && err != io.EOF {
+ if _, err := params.TreeReader.Seek(0, io.SeekStart); err != nil && err != io.EOF {
return nil, err
}
@@ -176,11 +218,11 @@ func Generate(data io.ReadSeeker, dataSize int64, treeReader io.ReadSeeker, tree
if level == 0 {
// Read data block from the target file since level 0 includes hashes
// of blocks in the input data.
- n, err = data.Read(buf)
+ n, err = params.File.Read(buf)
} else {
// Read data block from the tree file since levels higher than 0 are
// hashing the lower level hashes.
- n, err = treeReader.Read(buf)
+ n, err = params.TreeReader.Read(buf)
}
// err is populated as long as the bytes read is smaller than the buffer
@@ -200,7 +242,7 @@ func Generate(data io.ReadSeeker, dataSize int64, treeReader io.ReadSeeker, tree
}
// Write the generated hash to the end of the tree file.
- if _, err = treeWriter.Write(digest[:]); err != nil {
+ if _, err = params.TreeWriter.Write(digest[:]); err != nil {
return nil, err
}
}
@@ -208,45 +250,124 @@ func Generate(data io.ReadSeeker, dataSize int64, treeReader io.ReadSeeker, tree
// remaining of the last block. But no need to do so for root.
if level != layout.rootLevel() && numBlocks%layout.hashesPerBlock() != 0 {
zeroBuf := make([]byte, layout.blockSize-(numBlocks%layout.hashesPerBlock())*layout.digestSize)
- if _, err := treeWriter.Write(zeroBuf[:]); err != nil {
+ if _, err := params.TreeWriter.Write(zeroBuf[:]); err != nil {
return nil, err
}
}
numBlocks = (numBlocks + layout.hashesPerBlock() - 1) / layout.hashesPerBlock()
}
- return root, nil
+ descriptor := VerityDescriptor{
+ Name: params.Name,
+ Mode: params.Mode,
+ UID: params.UID,
+ GID: params.GID,
+ RootHash: root,
+ }
+ ret := sha256.Sum256([]byte(descriptor.String()))
+ return ret[:], nil
+}
+
+// VerifyParams contains the params used to verify a portion of a file against
+// a Merkle tree.
+type VerifyParams struct {
+ // Out will be filled with verified data.
+ Out io.Writer
+ // File is a handler on the file to be verified.
+ File io.ReadSeeker
+ // tree is a handler on the Merkle tree used to verify file.
+ Tree io.ReadSeeker
+ // Size is the size of the file.
+ Size int64
+ // Name is the name of the target file.
+ Name string
+ // Mode is the mode of the target file.
+ Mode uint32
+ // UID is the user ID of the target file.
+ UID uint32
+ // GID is the group ID of the target file.
+ GID uint32
+ // ReadOffset is the offset of the data range to be verified.
+ ReadOffset int64
+ // ReadSize is the size of the data range to be verified.
+ ReadSize int64
+ // ExpectedRoot is a trusted hash for the file. It is compared with the
+ // calculated root hash to verify the content.
+ ExpectedRoot []byte
+ // DataAndTreeInSameFile is true if data and Merkle tree are in the same
+ // file, or false if Merkle tree is a separate file from data.
+ DataAndTreeInSameFile bool
+}
+
+// verifyDescriptor generates a hash from descriptor, and compares it with
+// expectedRoot.
+func verifyDescriptor(descriptor VerityDescriptor, expectedRoot []byte) error {
+ h := sha256.Sum256([]byte(descriptor.String()))
+ if !bytes.Equal(h[:], expectedRoot) {
+ return fmt.Errorf("unexpected root hash")
+ }
+ return nil
+}
+
+// verifyMetadata verifies the metadata by hashing a descriptor that contains
+// the metadata and compare the generated hash with expectedRoot.
+//
+// For verifyMetadata, params.data is not needed. It only accesses params.tree
+// for the raw root hash.
+func verifyMetadata(params *VerifyParams, layout Layout) error {
+ if _, err := params.Tree.Seek(layout.blockOffset(layout.rootLevel(), 0 /* index */), io.SeekStart); err != nil {
+ return fmt.Errorf("failed to seek to root hash: %w", err)
+ }
+ root := make([]byte, layout.digestSize)
+ if _, err := params.Tree.Read(root); err != nil {
+ return fmt.Errorf("failed to read root hash: %w", err)
+ }
+ descriptor := VerityDescriptor{
+ Name: params.Name,
+ Mode: params.Mode,
+ UID: params.UID,
+ GID: params.GID,
+ RootHash: root,
+ }
+ return verifyDescriptor(descriptor, params.ExpectedRoot)
}
// Verify verifies the content read from data with offset. The content is
// verified against tree. If content spans across multiple blocks, each block is
// verified. Verification fails if the hash of the data does not match the tree
// at any level, or if the final root hash does not match expectedRoot.
-// Once the data is verified, it will be written using w.
+// Once the data is verified, it will be written using params.Out.
+//
+// Verify checks for both target file content and metadata. If readSize is 0,
+// only metadata is checked.
+//
// Verify will modify the cursor for data, but always restores it to its
// original position upon exit. The cursor for tree is modified and not
// restored.
-func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset int64, readSize int64, expectedRoot []byte, dataAndTreeInSameFile bool) (int64, error) {
- if readSize <= 0 {
- return 0, fmt.Errorf("Unexpected read size: %d", readSize)
+func Verify(params *VerifyParams) (int64, error) {
+ if params.ReadSize < 0 {
+ return 0, fmt.Errorf("unexpected read size: %d", params.ReadSize)
+ }
+ layout := InitLayout(int64(params.Size), params.DataAndTreeInSameFile)
+ if params.ReadSize == 0 {
+ return 0, verifyMetadata(params, layout)
}
- layout := InitLayout(int64(dataSize), dataAndTreeInSameFile)
// Calculate the index of blocks that includes the target range in input
// data.
- firstDataBlock := readOffset / layout.blockSize
- lastDataBlock := (readOffset + readSize - 1) / layout.blockSize
+ firstDataBlock := params.ReadOffset / layout.blockSize
+ lastDataBlock := (params.ReadOffset + params.ReadSize - 1) / layout.blockSize
// Store the current offset, so we can set it back once verification
// finishes.
- origOffset, err := data.Seek(0, io.SeekCurrent)
+ origOffset, err := params.File.Seek(0, io.SeekCurrent)
if err != nil {
- return 0, fmt.Errorf("Find current data offset failed: %v", err)
+ return 0, fmt.Errorf("find current data offset failed: %w", err)
}
- defer data.Seek(origOffset, io.SeekStart)
+ defer params.File.Seek(origOffset, io.SeekStart)
// Move to the first block that contains target data.
- if _, err := data.Seek(firstDataBlock*layout.blockSize, io.SeekStart); err != nil {
- return 0, fmt.Errorf("Seek to datablock start failed: %v", err)
+ if _, err := params.File.Seek(firstDataBlock*layout.blockSize, io.SeekStart); err != nil {
+ return 0, fmt.Errorf("seek to datablock start failed: %w", err)
}
buf := make([]byte, layout.blockSize)
@@ -255,7 +376,7 @@ func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset in
for i := firstDataBlock; i <= lastDataBlock; i++ {
// Read a block that includes all or part of target range in
// input data.
- bytesRead, err := data.Read(buf)
+ bytesRead, err := params.File.Read(buf)
readErr = err
// If at the end of input data and all previous blocks are
// verified, return the verified input data and EOF.
@@ -263,7 +384,7 @@ func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset in
break
}
if readErr != nil && readErr != io.EOF {
- return 0, fmt.Errorf("Read from data failed: %v", err)
+ return 0, fmt.Errorf("read from data failed: %w", err)
}
// If this is the end of file, zero the remaining bytes in buf,
// otherwise they are still from the previous block.
@@ -274,22 +395,29 @@ func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset in
buf[j] = 0
}
}
- if err := verifyBlock(tree, layout, buf, i, expectedRoot); err != nil {
+ descriptor := VerityDescriptor{
+ Name: params.Name,
+ Mode: params.Mode,
+ UID: params.UID,
+ GID: params.GID,
+ }
+ if err := verifyBlock(params.Tree, descriptor, layout, buf, i, params.ExpectedRoot); err != nil {
return 0, err
}
+
// startOff is the beginning of the read range within the
// current data block. Note that for all blocks other than the
// first, startOff should be 0.
startOff := int64(0)
if i == firstDataBlock {
- startOff = readOffset % layout.blockSize
+ startOff = params.ReadOffset % layout.blockSize
}
// endOff is the end of the read range within the current data
// block. Note that for all blocks other than the last, endOff
// should be the block size.
endOff := layout.blockSize
if i == lastDataBlock {
- endOff = (readOffset+readSize-1)%layout.blockSize + 1
+ endOff = (params.ReadOffset+params.ReadSize-1)%layout.blockSize + 1
}
// If the provided size exceeds the end of input data, we should
// only copy the parts in buf that's part of input data.
@@ -299,7 +427,7 @@ func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset in
if endOff > int64(bytesRead) {
endOff = int64(bytesRead)
}
- n, err := w.Write(buf[startOff:endOff])
+ n, err := params.Out.Write(buf[startOff:endOff])
if err != nil {
return total, err
}
@@ -313,9 +441,11 @@ func Verify(w io.Writer, data, tree io.ReadSeeker, dataSize int64, readOffset in
// original data. The block is verified through each level of the tree. It
// fails if the calculated hash from block is different from any level of
// hashes stored in tree. And the final root hash is compared with
-// expectedRoot. verifyBlock modifies the cursor for tree. Users needs to
-// maintain the cursor if intended.
-func verifyBlock(tree io.ReadSeeker, layout Layout, dataBlock []byte, blockIndex int64, expectedRoot []byte) error {
+// expectedRoot.
+//
+// verifyBlock modifies the cursor for tree. Users needs to maintain the cursor
+// if intended.
+func verifyBlock(tree io.ReadSeeker, descriptor VerityDescriptor, layout Layout, dataBlock []byte, blockIndex int64, expectedRoot []byte) error {
if len(dataBlock) != int(layout.blockSize) {
return fmt.Errorf("incorrect block size")
}
@@ -352,21 +482,13 @@ func verifyBlock(tree io.ReadSeeker, layout Layout, dataBlock []byte, blockIndex
}
if !bytes.Equal(digest, expectedDigest) {
- return fmt.Errorf("Verification failed")
- }
-
- // If this is the root layer, no need to generate next level
- // hash.
- if level == layout.rootLevel() {
- break
+ return fmt.Errorf("verification failed")
}
blockIndex = blockIndex / layout.hashesPerBlock()
}
- // Verification for the tree succeeded. Now compare the root hash in the
- // tree with expectedRoot.
- if !bytes.Equal(digest[:], expectedRoot) {
- return fmt.Errorf("Verification failed")
- }
- return nil
+ // Verification for the tree succeeded. Now hash the descriptor with
+ // the root hash and compare it with expectedRoot.
+ descriptor.RootHash = digest
+ return verifyDescriptor(descriptor, expectedRoot)
}
diff --git a/pkg/merkletree/merkletree_test.go b/pkg/merkletree/merkletree_test.go
index daaca759a..bb11ec844 100644
--- a/pkg/merkletree/merkletree_test.go
+++ b/pkg/merkletree/merkletree_test.go
@@ -84,6 +84,13 @@ func TestLayout(t *testing.T) {
}
}
+const (
+ defaultName = "merkle_test"
+ defaultMode = 0644
+ defaultUID = 0
+ defaultGID = 0
+)
+
// bytesReadWriter is used to read from/write to/seek in a byte array. Unlike
// bytes.Buffer, it keeps the whole buffer during read so that it can be reused.
type bytesReadWriter struct {
@@ -138,19 +145,19 @@ func TestGenerate(t *testing.T) {
}{
{
data: bytes.Repeat([]byte{0}, usermem.PageSize),
- expectedRoot: []byte{173, 127, 172, 178, 88, 111, 198, 233, 102, 192, 4, 215, 209, 209, 107, 2, 79, 88, 5, 255, 124, 180, 124, 122, 133, 218, 189, 139, 72, 137, 44, 167},
+ expectedRoot: []byte{64, 253, 58, 72, 192, 131, 82, 184, 193, 33, 108, 142, 43, 46, 179, 134, 244, 21, 29, 190, 14, 39, 66, 129, 6, 46, 200, 211, 30, 247, 191, 252},
},
{
data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1),
- expectedRoot: []byte{62, 93, 40, 92, 161, 241, 30, 223, 202, 99, 39, 2, 132, 113, 240, 139, 117, 99, 79, 243, 54, 18, 100, 184, 141, 121, 238, 46, 149, 202, 203, 132},
+ expectedRoot: []byte{182, 223, 218, 62, 65, 185, 160, 219, 93, 119, 186, 88, 205, 32, 122, 231, 173, 72, 78, 76, 65, 57, 177, 146, 159, 39, 44, 123, 230, 156, 97, 26},
},
{
data: []byte{'a'},
- expectedRoot: []byte{52, 75, 204, 142, 172, 129, 37, 14, 145, 137, 103, 203, 11, 162, 209, 205, 30, 169, 213, 72, 20, 28, 243, 24, 242, 2, 92, 43, 169, 59, 110, 210},
+ expectedRoot: []byte{28, 201, 8, 36, 150, 178, 111, 5, 193, 212, 129, 205, 206, 124, 211, 90, 224, 142, 81, 183, 72, 165, 243, 240, 242, 241, 76, 127, 101, 61, 63, 11},
},
{
data: bytes.Repeat([]byte{'a'}, usermem.PageSize),
- expectedRoot: []byte{201, 62, 238, 45, 13, 176, 47, 16, 172, 199, 70, 13, 149, 118, 225, 34, 220, 248, 205, 83, 196, 191, 141, 252, 174, 27, 62, 116, 235, 207, 255, 90},
+ expectedRoot: []byte{106, 58, 160, 152, 41, 68, 38, 108, 245, 74, 177, 84, 64, 193, 19, 176, 249, 86, 27, 193, 85, 164, 99, 240, 79, 104, 148, 222, 76, 46, 191, 79},
},
}
@@ -158,16 +165,25 @@ func TestGenerate(t *testing.T) {
t.Run(fmt.Sprintf("%d:%v", len(tc.data), tc.data[0]), func(t *testing.T) {
for _, dataAndTreeInSameFile := range []bool{false, true} {
var tree bytesReadWriter
- var root []byte
- var err error
+ params := GenerateParams{
+ Size: int64(len(tc.data)),
+ Name: defaultName,
+ Mode: defaultMode,
+ UID: defaultUID,
+ GID: defaultGID,
+ TreeReader: &tree,
+ TreeWriter: &tree,
+ DataAndTreeInSameFile: dataAndTreeInSameFile,
+ }
if dataAndTreeInSameFile {
tree.Write(tc.data)
- root, err = Generate(&tree, int64(len(tc.data)), &tree, &tree, dataAndTreeInSameFile)
+ params.File = &tree
} else {
- root, err = Generate(&bytesReadWriter{
+ params.File = &bytesReadWriter{
bytes: tc.data,
- }, int64(len(tc.data)), &tree, &tree, dataAndTreeInSameFile)
+ }
}
+ root, err := Generate(&params)
if err != nil {
t.Fatalf("Got err: %v, want nil", err)
}
@@ -194,6 +210,10 @@ func TestVerify(t *testing.T) {
// modified byte falls in verification range, Verify should
// fail, otherwise Verify should still succeed.
modifyByte int64
+ modifyName bool
+ modifyMode bool
+ modifyUID bool
+ modifyGID bool
shouldSucceed bool
}{
// Verify range start outside the data range should fail.
@@ -222,12 +242,48 @@ func TestVerify(t *testing.T) {
modifyByte: 0,
shouldSucceed: false,
},
- // Invalid verify range (0 size) should fail.
+ // 0 verify size should only verify metadata.
+ {
+ dataSize: usermem.PageSize,
+ verifyStart: 0,
+ verifySize: 0,
+ modifyByte: 0,
+ shouldSucceed: true,
+ },
+ // Modified name should fail verification.
+ {
+ dataSize: usermem.PageSize,
+ verifyStart: 0,
+ verifySize: 0,
+ modifyByte: 0,
+ modifyName: true,
+ shouldSucceed: false,
+ },
+ // Modified mode should fail verification.
{
dataSize: usermem.PageSize,
verifyStart: 0,
verifySize: 0,
modifyByte: 0,
+ modifyMode: true,
+ shouldSucceed: false,
+ },
+ // Modified UID should fail verification.
+ {
+ dataSize: usermem.PageSize,
+ verifyStart: 0,
+ verifySize: 0,
+ modifyByte: 0,
+ modifyUID: true,
+ shouldSucceed: false,
+ },
+ // Modified GID should fail verification.
+ {
+ dataSize: usermem.PageSize,
+ verifyStart: 0,
+ verifySize: 0,
+ modifyByte: 0,
+ modifyGID: true,
shouldSucceed: false,
},
// The test cases below use a block-aligned verify range.
@@ -316,16 +372,25 @@ func TestVerify(t *testing.T) {
for _, dataAndTreeInSameFile := range []bool{false, true} {
var tree bytesReadWriter
- var root []byte
- var err error
+ genParams := GenerateParams{
+ Size: int64(len(data)),
+ Name: defaultName,
+ Mode: defaultMode,
+ UID: defaultUID,
+ GID: defaultGID,
+ TreeReader: &tree,
+ TreeWriter: &tree,
+ DataAndTreeInSameFile: dataAndTreeInSameFile,
+ }
if dataAndTreeInSameFile {
tree.Write(data)
- root, err = Generate(&tree, int64(len(data)), &tree, &tree, dataAndTreeInSameFile)
+ genParams.File = &tree
} else {
- root, err = Generate(&bytesReadWriter{
+ genParams.File = &bytesReadWriter{
bytes: data,
- }, int64(tc.dataSize), &tree, &tree, false /* dataAndTreeInSameFile */)
+ }
}
+ root, err := Generate(&genParams)
if err != nil {
t.Fatalf("Generate failed: %v", err)
}
@@ -333,8 +398,34 @@ func TestVerify(t *testing.T) {
// Flip a bit in data and checks Verify results.
var buf bytes.Buffer
data[tc.modifyByte] ^= 1
+ verifyParams := VerifyParams{
+ Out: &buf,
+ File: bytes.NewReader(data),
+ Tree: &tree,
+ Size: tc.dataSize,
+ Name: defaultName,
+ Mode: defaultMode,
+ UID: defaultUID,
+ GID: defaultGID,
+ ReadOffset: tc.verifyStart,
+ ReadSize: tc.verifySize,
+ ExpectedRoot: root,
+ DataAndTreeInSameFile: dataAndTreeInSameFile,
+ }
+ if tc.modifyName {
+ verifyParams.Name = defaultName + "abc"
+ }
+ if tc.modifyMode {
+ verifyParams.Mode = defaultMode + 1
+ }
+ if tc.modifyUID {
+ verifyParams.UID = defaultUID + 1
+ }
+ if tc.modifyGID {
+ verifyParams.GID = defaultGID + 1
+ }
if tc.shouldSucceed {
- n, err := Verify(&buf, bytes.NewReader(data), &tree, tc.dataSize, tc.verifyStart, tc.verifySize, root, dataAndTreeInSameFile)
+ n, err := Verify(&verifyParams)
if err != nil && err != io.EOF {
t.Errorf("Verification failed when expected to succeed: %v", err)
}
@@ -348,7 +439,7 @@ func TestVerify(t *testing.T) {
t.Errorf("Incorrect output buf from Verify")
}
} else {
- if _, err := Verify(&buf, bytes.NewReader(data), &tree, tc.dataSize, tc.verifyStart, tc.verifySize, root, dataAndTreeInSameFile); err == nil {
+ if _, err := Verify(&verifyParams); err == nil {
t.Errorf("Verification succeeded when expected to fail")
}
}
@@ -368,16 +459,26 @@ func TestVerifyRandom(t *testing.T) {
for _, dataAndTreeInSameFile := range []bool{false, true} {
var tree bytesReadWriter
- var root []byte
- var err error
+ genParams := GenerateParams{
+ Size: int64(len(data)),
+ Name: defaultName,
+ Mode: defaultMode,
+ UID: defaultUID,
+ GID: defaultGID,
+ TreeReader: &tree,
+ TreeWriter: &tree,
+ DataAndTreeInSameFile: dataAndTreeInSameFile,
+ }
+
if dataAndTreeInSameFile {
tree.Write(data)
- root, err = Generate(&tree, int64(len(data)), &tree, &tree, dataAndTreeInSameFile)
+ genParams.File = &tree
} else {
- root, err = Generate(&bytesReadWriter{
+ genParams.File = &bytesReadWriter{
bytes: data,
- }, int64(dataSize), &tree, &tree, dataAndTreeInSameFile)
+ }
}
+ root, err := Generate(&genParams)
if err != nil {
t.Fatalf("Generate failed: %v", err)
}
@@ -387,9 +488,24 @@ func TestVerifyRandom(t *testing.T) {
size := rand.Int63n(dataSize) + 1
var buf bytes.Buffer
+ verifyParams := VerifyParams{
+ Out: &buf,
+ File: bytes.NewReader(data),
+ Tree: &tree,
+ Size: dataSize,
+ Name: defaultName,
+ Mode: defaultMode,
+ UID: defaultUID,
+ GID: defaultGID,
+ ReadOffset: start,
+ ReadSize: size,
+ ExpectedRoot: root,
+ DataAndTreeInSameFile: dataAndTreeInSameFile,
+ }
+
// Checks that the random portion of data from the original data is
// verified successfully.
- n, err := Verify(&buf, bytes.NewReader(data), &tree, dataSize, start, size, root, dataAndTreeInSameFile)
+ n, err := Verify(&verifyParams)
if err != nil && err != io.EOF {
t.Errorf("Verification failed for correct data: %v", err)
}
@@ -406,13 +522,22 @@ func TestVerifyRandom(t *testing.T) {
t.Errorf("Incorrect output buf from Verify")
}
+ // Verify that modified metadata should fail verification.
buf.Reset()
+ verifyParams.Name = defaultName + "abc"
+ if _, err := Verify(&verifyParams); err == nil {
+ t.Error("Verify succeeded for modified metadata, expect failure")
+ }
+
// Flip a random bit in randPortion, and check that verification fails.
+ buf.Reset()
randBytePos := rand.Int63n(size)
data[start+randBytePos] ^= 1
+ verifyParams.File = bytes.NewReader(data)
+ verifyParams.Name = defaultName
- if _, err := Verify(&buf, bytes.NewReader(data), &tree, dataSize, start, size, root, dataAndTreeInSameFile); err == nil {
- t.Errorf("Verification succeeded for modified data")
+ if _, err := Verify(&verifyParams); err == nil {
+ t.Error("Verification succeeded for modified data, expect failure")
}
}
}
diff --git a/pkg/sentry/fsimpl/verity/filesystem.go b/pkg/sentry/fsimpl/verity/filesystem.go
index a560b0797..7779271a9 100644
--- a/pkg/sentry/fsimpl/verity/filesystem.go
+++ b/pkg/sentry/fsimpl/verity/filesystem.go
@@ -20,6 +20,7 @@ import (
"io"
"strconv"
"strings"
+ "sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
@@ -251,11 +252,35 @@ func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *de
Ctx: ctx,
}
+ parentStat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: parent.lowerVD,
+ Start: parent.lowerVD,
+ }, &vfs.StatOptions{})
+ if err == syserror.ENOENT {
+ return nil, alertIntegrityViolation(err, fmt.Sprintf("Failed to get parent stat for %s: %v", childPath, err))
+ }
+ if err != nil {
+ return nil, err
+ }
+
// Since we are verifying against a directory Merkle tree, buf should
// contain the root hash of the children in the parent Merkle tree when
// Verify returns with success.
var buf bytes.Buffer
- if _, err := merkletree.Verify(&buf, &fdReader, &fdReader, int64(parentSize), int64(offset), int64(merkletree.DigestSize()), parent.rootHash, true /* dataAndTreeInSameFile */); err != nil && err != io.EOF {
+ if _, err := merkletree.Verify(&merkletree.VerifyParams{
+ Out: &buf,
+ File: &fdReader,
+ Tree: &fdReader,
+ Size: int64(parentSize),
+ Name: parent.name,
+ Mode: uint32(parentStat.Mode),
+ UID: parentStat.UID,
+ GID: parentStat.GID,
+ ReadOffset: int64(offset),
+ ReadSize: int64(merkletree.DigestSize()),
+ ExpectedRoot: parent.rootHash,
+ DataAndTreeInSameFile: true,
+ }); err != nil && err != io.EOF {
return nil, alertIntegrityViolation(syserror.EIO, fmt.Sprintf("Verification for %s failed: %v", childPath, err))
}
@@ -266,6 +291,84 @@ func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *de
return child, nil
}
+// verifyStat verifies the stat against the verified root hash. The mode/uid/gid
+// of the file is cached after verified.
+func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Statx) error {
+ vfsObj := fs.vfsfs.VirtualFilesystem()
+
+ // Get the path to the child dentry. This is only used to provide path
+ // information in failure case.
+ childPath, err := vfsObj.PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.lowerVD)
+ if err != nil {
+ return err
+ }
+
+ verityMu.RLock()
+ defer verityMu.RUnlock()
+
+ fd, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: d.lowerMerkleVD,
+ Start: d.lowerMerkleVD,
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ })
+ if err == syserror.ENOENT {
+ return alertIntegrityViolation(err, fmt.Sprintf("Failed to open merkle file for %s: %v", childPath, err))
+ }
+ if err != nil {
+ return err
+ }
+
+ merkleSize, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{
+ Name: merkleSizeXattr,
+ Size: sizeOfStringInt32,
+ })
+
+ if err == syserror.ENODATA {
+ return alertIntegrityViolation(err, fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", merkleSizeXattr, childPath, err))
+ }
+ if err != nil {
+ return err
+ }
+
+ size, err := strconv.Atoi(merkleSize)
+ if err != nil {
+ return alertIntegrityViolation(syserror.EINVAL, fmt.Sprintf("Failed to convert xattr %s for %s to int: %v", merkleSizeXattr, childPath, err))
+ }
+
+ fdReader := vfs.FileReadWriteSeeker{
+ FD: fd,
+ Ctx: ctx,
+ }
+
+ var buf bytes.Buffer
+ params := &merkletree.VerifyParams{
+ Out: &buf,
+ Tree: &fdReader,
+ Size: int64(size),
+ Name: d.name,
+ Mode: uint32(stat.Mode),
+ UID: stat.UID,
+ GID: stat.GID,
+ ReadOffset: 0,
+ // Set read size to 0 so only the metadata is verified.
+ ReadSize: 0,
+ ExpectedRoot: d.rootHash,
+ DataAndTreeInSameFile: false,
+ }
+ if atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFDIR {
+ params.DataAndTreeInSameFile = true
+ }
+
+ if _, err := merkletree.Verify(params); err != nil && err != io.EOF {
+ return alertIntegrityViolation(err, fmt.Sprintf("Verification stat for %s failed: %v", childPath, err))
+ }
+ d.mode = uint32(stat.Mode)
+ d.uid = stat.UID
+ d.gid = stat.GID
+ return nil
+}
+
// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name string, ds **[]*dentry) (*dentry, error) {
if child, ok := parent.children[name]; ok {
@@ -274,9 +377,27 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
// runtime enable is allowed and the parent directory is
// enabled, we should verify the child root hash here because
// it may be cached before enabled.
- if fs.allowRuntimeEnable && len(parent.rootHash) != 0 {
- if _, err := fs.verifyChild(ctx, parent, child); err != nil {
- return nil, err
+ if fs.allowRuntimeEnable {
+ if isEnabled(parent) {
+ if _, err := fs.verifyChild(ctx, parent, child); err != nil {
+ return nil, err
+ }
+ }
+ if isEnabled(child) {
+ vfsObj := fs.vfsfs.VirtualFilesystem()
+ mask := uint32(linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID)
+ stat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: child.lowerVD,
+ Start: child.lowerVD,
+ }, &vfs.StatOptions{
+ Mask: mask,
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := fs.verifyStat(ctx, child, stat); err != nil {
+ return nil, err
+ }
}
}
return child, nil
@@ -426,7 +547,6 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
child.parent = parent
child.name = name
- // TODO(b/162788573): Verify child metadata.
child.mode = uint32(stat.Mode)
child.uid = stat.UID
child.gid = stat.GID
@@ -434,12 +554,18 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
// Verify child root hash. This should always be performed unless in
// allowRuntimeEnable mode and the parent directory hasn't been enabled
// yet.
- if !(fs.allowRuntimeEnable && len(parent.rootHash) == 0) {
+ if isEnabled(parent) {
if _, err := fs.verifyChild(ctx, parent, child); err != nil {
child.destroyLocked(ctx)
return nil, err
}
}
+ if isEnabled(child) {
+ if err := fs.verifyStat(ctx, child, stat); err != nil {
+ child.destroyLocked(ctx)
+ return nil, err
+ }
+ }
return child, nil
}
@@ -771,6 +897,8 @@ func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts
}
// StatAt implements vfs.FilesystemImpl.StatAt.
+// TODO(b/170157489): Investigate whether stats other than Mode/UID/GID should
+// be verified.
func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
var ds *[]*dentry
fs.renameMu.RLock()
@@ -788,6 +916,11 @@ func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
if err != nil {
return linux.Statx{}, err
}
+ if isEnabled(d) {
+ if err := fs.verifyStat(ctx, d, stat); err != nil {
+ return linux.Statx{}, err
+ }
+ }
return stat, nil
}
diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go
index fc5eabbca..3eb972237 100644
--- a/pkg/sentry/fsimpl/verity/verity.go
+++ b/pkg/sentry/fsimpl/verity/verity.go
@@ -142,6 +142,14 @@ func (FilesystemType) Name() string {
return Name
}
+// isEnabled checks whether the target is enabled with verity features. It
+// should always be true if runtime enable is not allowed. In runtime enable
+// mode, it returns true if the target has been enabled with
+// ioctl(FS_IOC_ENABLE_VERITY).
+func isEnabled(d *dentry) bool {
+ return !d.fs.allowRuntimeEnable || len(d.rootHash) != 0
+}
+
// alertIntegrityViolation alerts a violation of integrity, which usually means
// unexpected modification to the file system is detected. In
// noCrashOnVerificationFailure mode, it returns an error, otherwise it panic.
@@ -245,12 +253,17 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
return nil, nil, err
}
- // TODO(b/162788573): Verify Metadata.
d.mode = uint32(stat.Mode)
d.uid = stat.UID
d.gid = stat.GID
-
d.rootHash = make([]byte, len(iopts.RootHash))
+
+ if !fs.allowRuntimeEnable {
+ if err := fs.verifyStat(ctx, d, stat); err != nil {
+ return nil, nil, err
+ }
+ }
+
copy(d.rootHash, iopts.RootHash)
d.vfsd.Init(d)
@@ -488,6 +501,11 @@ func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linu
if err != nil {
return linux.Statx{}, err
}
+ if isEnabled(fd.d) {
+ if err := fd.d.fs.verifyStat(ctx, fd.d, stat); err != nil {
+ return linux.Statx{}, err
+ }
+ }
return stat, nil
}
@@ -516,8 +534,10 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,
FD: fd.merkleWriter,
Ctx: ctx,
}
- var rootHash []byte
- var dataSize uint64
+ params := &merkletree.GenerateParams{
+ TreeReader: &merkleReader,
+ TreeWriter: &merkleWriter,
+ }
switch atomic.LoadUint32(&fd.d.mode) & linux.S_IFMT {
case linux.S_IFREG:
@@ -528,12 +548,14 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,
if err != nil {
return nil, 0, err
}
- dataSize = stat.Size
- rootHash, err = merkletree.Generate(&fdReader, int64(dataSize), &merkleReader, &merkleWriter, false /* dataAndTreeInSameFile */)
- if err != nil {
- return nil, 0, err
- }
+ params.File = &fdReader
+ params.Size = int64(stat.Size)
+ params.Name = fd.d.name
+ params.Mode = uint32(stat.Mode)
+ params.UID = stat.UID
+ params.GID = stat.GID
+ params.DataAndTreeInSameFile = false
case linux.S_IFDIR:
// For a directory, generate a Merkle tree based on the root
// hashes of its children that has already been written to the
@@ -542,18 +564,27 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,
if err != nil {
return nil, 0, err
}
- dataSize = merkleStat.Size
- rootHash, err = merkletree.Generate(&merkleReader, int64(dataSize), &merkleReader, &merkleWriter, true /* dataAndTreeInSameFile */)
+ params.Size = int64(merkleStat.Size)
+
+ stat, err := fd.lowerFD.Stat(ctx, vfs.StatOptions{})
if err != nil {
return nil, 0, err
}
+
+ params.File = &merkleReader
+ params.Name = fd.d.name
+ params.Mode = uint32(stat.Mode)
+ params.UID = stat.UID
+ params.GID = stat.GID
+ params.DataAndTreeInSameFile = true
default:
// TODO(b/167728857): Investigate whether and how we should
// enable other types of file.
return nil, 0, syserror.EINVAL
}
- return rootHash, dataSize, nil
+ rootHash, err := merkletree.Generate(params)
+ return rootHash, uint64(params.Size), err
}
// enableVerity enables verity features on fd by generating a Merkle tree file
@@ -688,7 +719,7 @@ func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.
func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
// No need to verify if the file is not enabled yet in
// allowRuntimeEnable mode.
- if fd.d.fs.allowRuntimeEnable && len(fd.d.rootHash) == 0 {
+ if !isEnabled(fd.d) {
return fd.lowerFD.PRead(ctx, dst, offset, opts)
}
@@ -725,9 +756,22 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
Ctx: ctx,
}
- n, err := merkletree.Verify(dst.Writer(ctx), &dataReader, &merkleReader, int64(size), offset, dst.NumBytes(), fd.d.rootHash, false /* dataAndTreeInSameFile */)
+ n, err := merkletree.Verify(&merkletree.VerifyParams{
+ Out: dst.Writer(ctx),
+ File: &dataReader,
+ Tree: &merkleReader,
+ Size: int64(size),
+ Name: fd.d.name,
+ Mode: fd.d.mode,
+ UID: fd.d.uid,
+ GID: fd.d.gid,
+ ReadOffset: offset,
+ ReadSize: dst.NumBytes(),
+ ExpectedRoot: fd.d.rootHash,
+ DataAndTreeInSameFile: false,
+ })
if err != nil {
- return 0, alertIntegrityViolation(syserror.EINVAL, fmt.Sprintf("Verification failed: %v", err))
+ return 0, alertIntegrityViolation(syserror.EIO, fmt.Sprintf("Verification failed: %v", err))
}
return n, err
}
diff --git a/pkg/sentry/fsimpl/verity/verity_test.go b/pkg/sentry/fsimpl/verity/verity_test.go
index 8bcc14131..8d0926bc4 100644
--- a/pkg/sentry/fsimpl/verity/verity_test.go
+++ b/pkg/sentry/fsimpl/verity/verity_test.go
@@ -41,11 +41,11 @@ const maxDataSize = 100000
// newVerityRoot creates a new verity mount, and returns the root. The
// underlying file system is tmpfs. If the error is not nil, then cleanup
// should be called when the root is no longer needed.
-func newVerityRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDentry, func(), error) {
+func newVerityRoot(ctx context.Context, t *testing.T) (*vfs.VirtualFilesystem, vfs.VirtualDentry, error) {
rand.Seed(time.Now().UnixNano())
vfsObj := &vfs.VirtualFilesystem{}
if err := vfsObj.Init(ctx); err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("VFS init: %v", err)
+ return nil, vfs.VirtualDentry{}, fmt.Errorf("VFS init: %v", err)
}
vfsObj.MustRegisterFilesystemType("verity", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
@@ -67,13 +67,15 @@ func newVerityRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDent
},
})
if err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("NewMountNamespace: %v", err)
+ return nil, vfs.VirtualDentry{}, fmt.Errorf("NewMountNamespace: %v", err)
}
root := mntns.Root()
- return vfsObj, root, func() {
+ t.Helper()
+ t.Cleanup(func() {
root.DecRef(ctx)
mntns.DecRef(ctx)
- }, nil
+ })
+ return vfsObj, root, nil
}
// newFileFD creates a new file in the verity mount, and returns the FD. The FD
@@ -143,11 +145,10 @@ func corruptRandomBit(ctx context.Context, fd *vfs.FileDescription, size int) er
// file and the root Merkle tree file exist.
func TestOpen(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
if _, _, err := newFileFD(ctx, vfsObj, root, filename, 0644); err != nil {
@@ -178,15 +179,14 @@ func TestOpen(t *testing.T) {
}
}
-// TestUntouchedFileSucceeds ensures that read from an untouched verity file
+// TestUnmodifiedFileSucceeds ensures that read from an untouched verity file
// succeeds after enabling verity for it.
-func TestReadUntouchedFileSucceeds(t *testing.T) {
+func TestReadUnmodifiedFileSucceeds(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
@@ -212,15 +212,14 @@ func TestReadUntouchedFileSucceeds(t *testing.T) {
}
}
-// TestReopenUntouchedFileSucceeds ensures that reopen an untouched verity file
+// TestReopenUnmodifiedFileSucceeds ensures that reopen an untouched verity file
// succeeds after enabling verity for it.
-func TestReopenUntouchedFileSucceeds(t *testing.T) {
+func TestReopenUnmodifiedFileSucceeds(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
@@ -251,11 +250,10 @@ func TestReopenUntouchedFileSucceeds(t *testing.T) {
// TestModifiedFileFails ensures that read from a modified verity file fails.
func TestModifiedFileFails(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
@@ -298,11 +296,10 @@ func TestModifiedFileFails(t *testing.T) {
// corresponding Merkle tree file is modified.
func TestModifiedMerkleFails(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
@@ -353,11 +350,10 @@ func TestModifiedMerkleFails(t *testing.T) {
// the parent Merkle tree file is modified.
func TestModifiedParentMerkleFails(t *testing.T) {
ctx := contexttest.Context(t)
- vfsObj, root, cleanup, err := newVerityRoot(ctx)
+ vfsObj, root, err := newVerityRoot(ctx, t)
if err != nil {
t.Fatalf("newVerityRoot: %v", err)
}
- defer cleanup()
filename := "verity-test-file"
fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
@@ -427,3 +423,68 @@ func TestModifiedParentMerkleFails(t *testing.T) {
t.Errorf("OpenAt file with modified parent Merkle succeeded")
}
}
+
+// TestUnmodifiedStatSucceeds ensures that stat of an untouched verity file
+// succeeds after enabling verity for it.
+func TestUnmodifiedStatSucceeds(t *testing.T) {
+ ctx := contexttest.Context(t)
+ vfsObj, root, err := newVerityRoot(ctx, t)
+ if err != nil {
+ t.Fatalf("newVerityRoot: %v", err)
+ }
+
+ filename := "verity-test-file"
+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
+ if err != nil {
+ t.Fatalf("newFileFD: %v", err)
+ }
+
+ // Enable verity on the file and confirms stat succeeds.
+ var args arch.SyscallArguments
+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("fd.Ioctl: %v", err)
+ }
+
+ if _, err := fd.Stat(ctx, vfs.StatOptions{}); err != nil {
+ t.Errorf("fd.Stat: %v", err)
+ }
+}
+
+// TestModifiedStatFails checks that getting stat for a file with modified stat
+// should fail.
+func TestModifiedStatFails(t *testing.T) {
+ ctx := contexttest.Context(t)
+ vfsObj, root, err := newVerityRoot(ctx, t)
+ if err != nil {
+ t.Fatalf("newVerityRoot: %v", err)
+ }
+
+ filename := "verity-test-file"
+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
+ if err != nil {
+ t.Fatalf("newFileFD: %v", err)
+ }
+
+ // Enable verity on the file.
+ var args arch.SyscallArguments
+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("fd.Ioctl: %v", err)
+ }
+
+ lowerFD := fd.Impl().(*fileDescription).lowerFD
+ // Change the stat of the underlying file, and check that stat fails.
+ if err := lowerFD.SetStat(ctx, vfs.SetStatOptions{
+ Stat: linux.Statx{
+ Mask: uint32(linux.STATX_MODE),
+ Mode: 0777,
+ },
+ }); err != nil {
+ t.Fatalf("lowerFD.SetStat: %v", err)
+ }
+
+ if _, err := fd.Stat(ctx, vfs.StatOptions{}); err == nil {
+ t.Errorf("fd.Stat succeeded when it should fail")
+ }
+}