summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorChong Cai <chongc@google.com>2020-11-12 17:54:40 -0800
committergVisor bot <gvisor-bot@google.com>2020-11-12 17:56:21 -0800
commitf01f623879e87412e4d2340d37ff4d0fccdb6c2b (patch)
tree76d64e5d95a62fbef9761db86fb1a127fd51297e
parentd700ba22abb9e5f29749cc3843991c31dc00384d (diff)
Add children names into verity hash
children names map can be used to verify whether a child is expected during walking, so that we can detect unexpected modifications that deleted/renamed both the target file and the corresponding merkle tree file. PiperOrigin-RevId: 342170715
-rw-r--r--pkg/merkletree/merkletree.go16
-rw-r--r--pkg/merkletree/merkletree_test.go74
-rw-r--r--pkg/sentry/fsimpl/verity/filesystem.go205
-rw-r--r--pkg/sentry/fsimpl/verity/verity.go155
-rw-r--r--pkg/sentry/fsimpl/verity/verity_test.go290
5 files changed, 516 insertions, 224 deletions
diff --git a/pkg/merkletree/merkletree.go b/pkg/merkletree/merkletree.go
index e0a9e56c5..6acee90ef 100644
--- a/pkg/merkletree/merkletree.go
+++ b/pkg/merkletree/merkletree.go
@@ -19,6 +19,7 @@ import (
"bytes"
"crypto/sha256"
"crypto/sha512"
+ "encoding/gob"
"fmt"
"io"
@@ -151,11 +152,15 @@ type VerityDescriptor struct {
Mode uint32
UID uint32
GID uint32
+ Children map[string]struct{}
RootHash []byte
}
func (d *VerityDescriptor) String() string {
- return fmt.Sprintf("Name: %s, Size: %d, Mode: %d, UID: %d, GID: %d, RootHash: %v", d.Name, d.FileSize, d.Mode, d.UID, d.GID, d.RootHash)
+ b := new(bytes.Buffer)
+ e := gob.NewEncoder(b)
+ e.Encode(d.Children)
+ return fmt.Sprintf("Name: %s, Size: %d, Mode: %d, UID: %d, GID: %d, Children: %v, RootHash: %v", d.Name, d.FileSize, d.Mode, d.UID, d.GID, b.Bytes(), d.RootHash)
}
// verify generates a hash from d, and compares it with expected.
@@ -202,6 +207,9 @@ type GenerateParams struct {
UID uint32
// GID is the group ID of the target file.
GID uint32
+ // Children is a map of children names for a directory. It should be
+ // empty for a regular file.
+ Children map[string]struct{}
// HashAlgorithms is the algorithms used to hash data.
HashAlgorithms int
// TreeReader is a reader for the Merkle tree.
@@ -294,6 +302,7 @@ func Generate(params *GenerateParams) ([]byte, error) {
Mode: params.Mode,
UID: params.UID,
GID: params.GID,
+ Children: params.Children,
RootHash: root,
}
return hashData([]byte(descriptor.String()), params.HashAlgorithms)
@@ -318,6 +327,9 @@ type VerifyParams struct {
UID uint32
// GID is the group ID of the target file.
GID uint32
+ // Children is a map of children names for a directory. It should be
+ // empty for a regular file.
+ Children map[string]struct{}
// HashAlgorithms is the algorithms used to hash data.
HashAlgorithms int
// ReadOffset is the offset of the data range to be verified.
@@ -348,6 +360,7 @@ func verifyMetadata(params *VerifyParams, layout *Layout) error {
Mode: params.Mode,
UID: params.UID,
GID: params.GID,
+ Children: params.Children,
RootHash: root,
}
return descriptor.verify(params.Expected, params.HashAlgorithms)
@@ -409,6 +422,7 @@ func Verify(params *VerifyParams) (int64, error) {
Mode: params.Mode,
UID: params.UID,
GID: params.GID,
+ Children: params.Children,
}
if err := verifyBlock(params.Tree, &descriptor, &layout, buf, i, params.HashAlgorithms, params.Expected); err != nil {
return 0, err
diff --git a/pkg/merkletree/merkletree_test.go b/pkg/merkletree/merkletree_test.go
index 279c0103e..66ddf09e6 100644
--- a/pkg/merkletree/merkletree_test.go
+++ b/pkg/merkletree/merkletree_test.go
@@ -203,112 +203,112 @@ func TestGenerate(t *testing.T) {
data: bytes.Repeat([]byte{0}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: false,
- expectedHash: []byte{39, 30, 12, 152, 185, 58, 32, 84, 218, 79, 74, 113, 104, 219, 230, 234, 25, 126, 147, 36, 212, 44, 76, 74, 25, 93, 228, 41, 243, 143, 59, 147},
+ expectedHash: []byte{42, 197, 191, 52, 206, 122, 93, 34, 198, 125, 100, 154, 171, 177, 94, 14, 49, 40, 76, 157, 122, 58, 78, 6, 163, 248, 30, 238, 16, 190, 173, 175},
},
{
name: "OnePageZeroesSHA256SameFile",
data: bytes.Repeat([]byte{0}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: true,
- expectedHash: []byte{39, 30, 12, 152, 185, 58, 32, 84, 218, 79, 74, 113, 104, 219, 230, 234, 25, 126, 147, 36, 212, 44, 76, 74, 25, 93, 228, 41, 243, 143, 59, 147},
+ expectedHash: []byte{42, 197, 191, 52, 206, 122, 93, 34, 198, 125, 100, 154, 171, 177, 94, 14, 49, 40, 76, 157, 122, 58, 78, 6, 163, 248, 30, 238, 16, 190, 173, 175},
},
{
name: "OnePageZeroesSHA512SeparateFile",
data: bytes.Repeat([]byte{0}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: false,
- expectedHash: []byte{184, 76, 172, 204, 17, 136, 127, 75, 224, 42, 251, 181, 98, 149, 1, 44, 58, 148, 20, 187, 30, 174, 73, 87, 166, 9, 109, 169, 42, 96, 87, 202, 59, 82, 174, 80, 51, 95, 101, 100, 6, 246, 56, 120, 27, 166, 29, 59, 67, 115, 227, 121, 241, 177, 63, 238, 82, 157, 43, 107, 174, 180, 44, 84},
+ expectedHash: []byte{87, 131, 150, 74, 0, 218, 117, 114, 34, 23, 212, 16, 122, 97, 124, 172, 41, 46, 107, 150, 33, 46, 56, 39, 5, 246, 215, 187, 140, 83, 35, 63, 111, 74, 155, 241, 161, 214, 92, 141, 232, 125, 99, 71, 168, 102, 82, 20, 229, 249, 248, 28, 29, 238, 199, 223, 173, 180, 179, 46, 241, 240, 237, 74},
},
{
name: "OnePageZeroesSHA512SameFile",
data: bytes.Repeat([]byte{0}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: true,
- expectedHash: []byte{184, 76, 172, 204, 17, 136, 127, 75, 224, 42, 251, 181, 98, 149, 1, 44, 58, 148, 20, 187, 30, 174, 73, 87, 166, 9, 109, 169, 42, 96, 87, 202, 59, 82, 174, 80, 51, 95, 101, 100, 6, 246, 56, 120, 27, 166, 29, 59, 67, 115, 227, 121, 241, 177, 63, 238, 82, 157, 43, 107, 174, 180, 44, 84},
+ expectedHash: []byte{87, 131, 150, 74, 0, 218, 117, 114, 34, 23, 212, 16, 122, 97, 124, 172, 41, 46, 107, 150, 33, 46, 56, 39, 5, 246, 215, 187, 140, 83, 35, 63, 111, 74, 155, 241, 161, 214, 92, 141, 232, 125, 99, 71, 168, 102, 82, 20, 229, 249, 248, 28, 29, 238, 199, 223, 173, 180, 179, 46, 241, 240, 237, 74},
},
{
name: "MultiplePageZeroesSHA256SeparateFile",
data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: false,
- expectedHash: []byte{213, 221, 252, 9, 241, 250, 186, 1, 242, 132, 83, 77, 180, 207, 119, 48, 206, 113, 37, 253, 252, 159, 71, 70, 3, 53, 42, 244, 230, 244, 173, 143},
+ expectedHash: []byte{115, 151, 35, 147, 223, 91, 17, 6, 162, 145, 237, 81, 88, 53, 120, 49, 128, 70, 188, 28, 254, 241, 19, 233, 30, 243, 71, 225, 57, 58, 61, 38},
},
{
name: "MultiplePageZeroesSHA256SameFile",
data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: true,
- expectedHash: []byte{213, 221, 252, 9, 241, 250, 186, 1, 242, 132, 83, 77, 180, 207, 119, 48, 206, 113, 37, 253, 252, 159, 71, 70, 3, 53, 42, 244, 230, 244, 173, 143},
+ expectedHash: []byte{115, 151, 35, 147, 223, 91, 17, 6, 162, 145, 237, 81, 88, 53, 120, 49, 128, 70, 188, 28, 254, 241, 19, 233, 30, 243, 71, 225, 57, 58, 61, 38},
},
{
name: "MultiplePageZeroesSHA512SeparateFile",
data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: false,
- expectedHash: []byte{40, 231, 187, 28, 3, 171, 168, 36, 177, 244, 118, 131, 218, 226, 106, 55, 245, 157, 244, 147, 144, 57, 41, 182, 65, 6, 13, 49, 38, 66, 237, 117, 124, 110, 250, 246, 248, 132, 201, 156, 195, 201, 142, 179, 122, 128, 195, 194, 187, 240, 129, 171, 168, 182, 101, 58, 194, 155, 99, 147, 49, 130, 161, 178},
+ expectedHash: []byte{41, 94, 205, 97, 254, 226, 171, 69, 76, 102, 197, 47, 113, 53, 24, 244, 103, 131, 83, 73, 87, 212, 247, 140, 32, 144, 211, 158, 25, 131, 194, 57, 21, 224, 128, 119, 69, 100, 45, 50, 157, 54, 46, 214, 152, 179, 59, 78, 28, 48, 146, 160, 204, 48, 27, 90, 152, 193, 167, 45, 150, 67, 66, 217},
},
{
name: "MultiplePageZeroesSHA512SameFile",
data: bytes.Repeat([]byte{0}, 128*usermem.PageSize+1),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: true,
- expectedHash: []byte{40, 231, 187, 28, 3, 171, 168, 36, 177, 244, 118, 131, 218, 226, 106, 55, 245, 157, 244, 147, 144, 57, 41, 182, 65, 6, 13, 49, 38, 66, 237, 117, 124, 110, 250, 246, 248, 132, 201, 156, 195, 201, 142, 179, 122, 128, 195, 194, 187, 240, 129, 171, 168, 182, 101, 58, 194, 155, 99, 147, 49, 130, 161, 178},
+ expectedHash: []byte{41, 94, 205, 97, 254, 226, 171, 69, 76, 102, 197, 47, 113, 53, 24, 244, 103, 131, 83, 73, 87, 212, 247, 140, 32, 144, 211, 158, 25, 131, 194, 57, 21, 224, 128, 119, 69, 100, 45, 50, 157, 54, 46, 214, 152, 179, 59, 78, 28, 48, 146, 160, 204, 48, 27, 90, 152, 193, 167, 45, 150, 67, 66, 217},
},
{
name: "SingleASHA256SeparateFile",
data: []byte{'a'},
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: false,
- expectedHash: []byte{182, 25, 170, 240, 16, 153, 234, 4, 101, 238, 197, 154, 182, 168, 171, 96, 177, 33, 171, 117, 73, 78, 124, 239, 82, 255, 215, 121, 156, 95, 121, 171},
+ expectedHash: []byte{52, 159, 140, 206, 140, 138, 231, 140, 94, 14, 252, 66, 175, 128, 191, 14, 52, 215, 190, 184, 165, 50, 182, 224, 42, 156, 145, 0, 1, 15, 187, 85},
},
{
name: "SingleASHA256SameFile",
data: []byte{'a'},
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: true,
- expectedHash: []byte{182, 25, 170, 240, 16, 153, 234, 4, 101, 238, 197, 154, 182, 168, 171, 96, 177, 33, 171, 117, 73, 78, 124, 239, 82, 255, 215, 121, 156, 95, 121, 171},
+ expectedHash: []byte{52, 159, 140, 206, 140, 138, 231, 140, 94, 14, 252, 66, 175, 128, 191, 14, 52, 215, 190, 184, 165, 50, 182, 224, 42, 156, 145, 0, 1, 15, 187, 85},
},
{
name: "SingleASHA512SeparateFile",
data: []byte{'a'},
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: false,
- expectedHash: []byte{121, 28, 140, 244, 32, 222, 61, 255, 184, 65, 117, 84, 132, 197, 122, 214, 95, 249, 164, 77, 211, 192, 217, 59, 109, 255, 249, 253, 27, 142, 110, 29, 93, 153, 92, 211, 178, 198, 136, 34, 61, 157, 141, 94, 145, 191, 201, 134, 141, 138, 51, 26, 33, 187, 17, 196, 113, 234, 125, 219, 4, 41, 57, 120},
+ expectedHash: []byte{232, 90, 223, 95, 60, 151, 149, 172, 174, 58, 206, 97, 189, 103, 6, 202, 67, 248, 1, 189, 243, 51, 250, 42, 5, 89, 195, 9, 50, 74, 39, 169, 114, 228, 109, 225, 128, 210, 63, 94, 18, 133, 58, 48, 225, 100, 176, 55, 87, 60, 235, 224, 143, 41, 15, 253, 94, 28, 251, 233, 99, 207, 152, 108},
},
{
name: "SingleASHA512SameFile",
data: []byte{'a'},
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: true,
- expectedHash: []byte{121, 28, 140, 244, 32, 222, 61, 255, 184, 65, 117, 84, 132, 197, 122, 214, 95, 249, 164, 77, 211, 192, 217, 59, 109, 255, 249, 253, 27, 142, 110, 29, 93, 153, 92, 211, 178, 198, 136, 34, 61, 157, 141, 94, 145, 191, 201, 134, 141, 138, 51, 26, 33, 187, 17, 196, 113, 234, 125, 219, 4, 41, 57, 120},
+ expectedHash: []byte{232, 90, 223, 95, 60, 151, 149, 172, 174, 58, 206, 97, 189, 103, 6, 202, 67, 248, 1, 189, 243, 51, 250, 42, 5, 89, 195, 9, 50, 74, 39, 169, 114, 228, 109, 225, 128, 210, 63, 94, 18, 133, 58, 48, 225, 100, 176, 55, 87, 60, 235, 224, 143, 41, 15, 253, 94, 28, 251, 233, 99, 207, 152, 108},
},
{
name: "OnePageASHA256SeparateFile",
data: bytes.Repeat([]byte{'a'}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: false,
- expectedHash: []byte{17, 40, 99, 150, 206, 124, 196, 184, 41, 40, 50, 91, 113, 47, 8, 204, 2, 102, 202, 86, 157, 92, 218, 53, 151, 250, 234, 247, 191, 121, 113, 246},
+ expectedHash: []byte{157, 60, 139, 54, 248, 39, 187, 77, 31, 107, 241, 26, 240, 49, 83, 159, 182, 60, 128, 85, 121, 204, 15, 249, 44, 248, 127, 134, 58, 220, 41, 185},
},
{
name: "OnePageASHA256SameFile",
data: bytes.Repeat([]byte{'a'}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
dataAndTreeInSameFile: true,
- expectedHash: []byte{17, 40, 99, 150, 206, 124, 196, 184, 41, 40, 50, 91, 113, 47, 8, 204, 2, 102, 202, 86, 157, 92, 218, 53, 151, 250, 234, 247, 191, 121, 113, 246},
+ expectedHash: []byte{157, 60, 139, 54, 248, 39, 187, 77, 31, 107, 241, 26, 240, 49, 83, 159, 182, 60, 128, 85, 121, 204, 15, 249, 44, 248, 127, 134, 58, 220, 41, 185},
},
{
name: "OnePageASHA512SeparateFile",
data: bytes.Repeat([]byte{'a'}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: false,
- expectedHash: []byte{100, 22, 249, 78, 47, 163, 220, 231, 228, 165, 226, 192, 221, 77, 106, 69, 115, 104, 208, 155, 124, 206, 225, 233, 98, 249, 232, 225, 114, 119, 110, 216, 117, 106, 85, 7, 200, 206, 139, 81, 116, 37, 215, 158, 89, 110, 74, 86, 66, 95, 117, 237, 70, 56, 62, 175, 48, 147, 162, 122, 253, 57, 123, 84},
+ expectedHash: []byte{116, 22, 252, 100, 32, 241, 254, 228, 167, 228, 110, 146, 156, 189, 6, 30, 27, 127, 94, 181, 15, 98, 173, 60, 34, 102, 92, 174, 181, 80, 205, 90, 88, 12, 125, 194, 148, 175, 184, 168, 37, 66, 127, 194, 19, 132, 93, 147, 168, 217, 227, 131, 100, 25, 213, 255, 132, 60, 196, 217, 24, 158, 1, 50},
},
{
name: "OnePageASHA512SameFile",
data: bytes.Repeat([]byte{'a'}, usermem.PageSize),
hashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA512,
dataAndTreeInSameFile: true,
- expectedHash: []byte{100, 22, 249, 78, 47, 163, 220, 231, 228, 165, 226, 192, 221, 77, 106, 69, 115, 104, 208, 155, 124, 206, 225, 233, 98, 249, 232, 225, 114, 119, 110, 216, 117, 106, 85, 7, 200, 206, 139, 81, 116, 37, 215, 158, 89, 110, 74, 86, 66, 95, 117, 237, 70, 56, 62, 175, 48, 147, 162, 122, 253, 57, 123, 84},
+ expectedHash: []byte{116, 22, 252, 100, 32, 241, 254, 228, 167, 228, 110, 146, 156, 189, 6, 30, 27, 127, 94, 181, 15, 98, 173, 60, 34, 102, 92, 174, 181, 80, 205, 90, 88, 12, 125, 194, 148, 175, 184, 168, 37, 66, 127, 194, 19, 132, 93, 147, 168, 217, 227, 131, 100, 25, 213, 255, 132, 60, 196, 217, 24, 158, 1, 50},
},
}
@@ -321,6 +321,7 @@ func TestGenerate(t *testing.T) {
Mode: defaultMode,
UID: defaultUID,
GID: defaultGID,
+ Children: make(map[string]struct{}),
HashAlgorithms: tc.hashAlgorithms,
TreeReader: &tree,
TreeWriter: &tree,
@@ -362,6 +363,7 @@ func prepareVerify(t *testing.T, dataSize int64, hashAlgorithm int, dataAndTreeI
Mode: defaultMode,
UID: defaultUID,
GID: defaultGID,
+ Children: make(map[string]struct{}),
HashAlgorithms: hashAlgorithm,
TreeReader: &tree,
TreeWriter: &tree,
@@ -389,6 +391,7 @@ func prepareVerify(t *testing.T, dataSize int64, hashAlgorithm int, dataAndTreeI
Mode: defaultMode,
UID: defaultUID,
GID: defaultGID,
+ Children: make(map[string]struct{}),
HashAlgorithms: hashAlgorithm,
ReadOffset: verifyStart,
ReadSize: verifySize,
@@ -666,6 +669,45 @@ func TestVerifyModifiedGID(t *testing.T) {
}
}
+func TestVerifyModifiedChildren(t *testing.T) {
+ testCases := []struct {
+ name string
+ hashAlgorithm int
+ dataAndTreeInSameFile bool
+ }{
+ {
+ name: "SHA256SeparateFile",
+ hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA256,
+ dataAndTreeInSameFile: false,
+ },
+ {
+ name: "SHA512SeparateFile",
+ hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA512,
+ dataAndTreeInSameFile: false,
+ },
+ {
+ name: "SHA256SameFile",
+ hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA256,
+ dataAndTreeInSameFile: true,
+ },
+ {
+ name: "SHA512SameFile",
+ hashAlgorithm: linux.FS_VERITY_HASH_ALG_SHA512,
+ dataAndTreeInSameFile: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var buf bytes.Buffer
+ _, params := prepareVerify(t, usermem.PageSize /* dataSize */, tc.hashAlgorithm, tc.dataAndTreeInSameFile, 0 /* verifyStart */, 0 /* verifySize */, &buf)
+ params.Children["abc"] = struct{}{}
+ if _, err := Verify(&params); errors.Is(err, nil) {
+ t.Errorf("Verification succeeded when expected to fail")
+ }
+ })
+ }
+}
+
func TestModifyOutsideVerifyRange(t *testing.T) {
testCases := []struct {
name string
diff --git a/pkg/sentry/fsimpl/verity/filesystem.go b/pkg/sentry/fsimpl/verity/filesystem.go
index 4e8d63d51..add5dd48e 100644
--- a/pkg/sentry/fsimpl/verity/filesystem.go
+++ b/pkg/sentry/fsimpl/verity/filesystem.go
@@ -16,6 +16,7 @@ package verity
import (
"bytes"
+ "encoding/json"
"fmt"
"io"
"strconv"
@@ -31,6 +32,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/usermem"
)
// Sync implements vfs.FilesystemImpl.Sync.
@@ -267,14 +269,15 @@ func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *de
// Verify returns with success.
var buf bytes.Buffer
if _, err := merkletree.Verify(&merkletree.VerifyParams{
- Out: &buf,
- File: &fdReader,
- Tree: &fdReader,
- Size: int64(parentSize),
- Name: parent.name,
- Mode: uint32(parentStat.Mode),
- UID: parentStat.UID,
- GID: parentStat.GID,
+ Out: &buf,
+ File: &fdReader,
+ Tree: &fdReader,
+ Size: int64(parentSize),
+ Name: parent.name,
+ Mode: uint32(parentStat.Mode),
+ UID: parentStat.UID,
+ GID: parentStat.GID,
+ Children: parent.childrenNames,
//TODO(b/156980949): Support passing other hash algorithms.
HashAlgorithms: fs.alg.toLinuxHashAlg(),
ReadOffset: int64(offset),
@@ -292,9 +295,10 @@ func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *de
return child, nil
}
-// verifyStat verifies the stat against the verified hash. The mode/uid/gid of
-// the file is cached after verified.
-func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Statx) error {
+// verifyStatAndChildren verifies the stat and children names against the
+// verified hash. The mode/uid/gid and childrenNames of the file is cached
+// after verified.
+func (fs *filesystem) verifyStatAndChildren(ctx context.Context, d *dentry, stat linux.Statx) error {
vfsObj := fs.vfsfs.VirtualFilesystem()
// Get the path to the child dentry. This is only used to provide path
@@ -337,6 +341,49 @@ func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Stat
return alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s for %s to int: %v", merkleSizeXattr, childPath, err))
}
+ if d.isDir() && len(d.childrenNames) == 0 {
+ childrenOffString, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{
+ Name: childrenOffsetXattr,
+ Size: sizeOfStringInt32,
+ })
+
+ if err == syserror.ENODATA {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", childrenOffsetXattr, childPath, err))
+ }
+ if err != nil {
+ return err
+ }
+ childrenOffset, err := strconv.Atoi(childrenOffString)
+ if err != nil {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s to int: %v", childrenOffsetXattr, err))
+ }
+
+ childrenSizeString, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{
+ Name: childrenSizeXattr,
+ Size: sizeOfStringInt32,
+ })
+
+ if err == syserror.ENODATA {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", childrenSizeXattr, childPath, err))
+ }
+ if err != nil {
+ return err
+ }
+ childrenSize, err := strconv.Atoi(childrenSizeString)
+ if err != nil {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s to int: %v", childrenSizeXattr, err))
+ }
+
+ childrenNames := make([]byte, childrenSize)
+ if _, err := fd.PRead(ctx, usermem.BytesIOSequence(childrenNames), int64(childrenOffset), vfs.ReadOptions{}); err != nil {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to read children map for %s: %v", childPath, err))
+ }
+
+ if err := json.Unmarshal(childrenNames, &d.childrenNames); err != nil {
+ return alertIntegrityViolation(fmt.Sprintf("Failed to deserialize childrenNames of %s: %v", childPath, err))
+ }
+ }
+
fdReader := vfs.FileReadWriteSeeker{
FD: fd,
Ctx: ctx,
@@ -344,13 +391,14 @@ func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Stat
var buf bytes.Buffer
params := &merkletree.VerifyParams{
- Out: &buf,
- Tree: &fdReader,
- Size: int64(size),
- Name: d.name,
- Mode: uint32(stat.Mode),
- UID: stat.UID,
- GID: stat.GID,
+ Out: &buf,
+ Tree: &fdReader,
+ Size: int64(size),
+ Name: d.name,
+ Mode: uint32(stat.Mode),
+ UID: stat.UID,
+ GID: stat.GID,
+ Children: d.childrenNames,
//TODO(b/156980949): Support passing other hash algorithms.
HashAlgorithms: fs.alg.toLinuxHashAlg(),
ReadOffset: 0,
@@ -438,7 +486,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
if err != nil {
return nil, err
}
- if err := fs.verifyStat(ctx, child, stat); err != nil {
+ if err := fs.verifyStatAndChildren(ctx, child, stat); err != nil {
return nil, err
}
}
@@ -462,92 +510,58 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry, name string) (*dentry, error) {
vfsObj := fs.vfsfs.VirtualFilesystem()
- childVD, childErr := parent.getLowerAt(ctx, vfsObj, name)
- // We will handle ENOENT separately, as it may indicate unexpected
- // modifications to the file system, and may cause a sentry panic.
- if childErr != nil && childErr != syserror.ENOENT {
- return nil, childErr
+ if parent.verityEnabled() {
+ if _, ok := parent.childrenNames[name]; !ok {
+ return nil, syserror.ENOENT
+ }
}
- // The dentry needs to be cleaned up if any error occurs. IncRef will be
- // called if a verity child dentry is successfully created.
- if childErr == nil {
- defer childVD.DecRef(ctx)
+ parentPath, err := vfsObj.PathnameWithDeleted(ctx, parent.fs.rootDentry.lowerVD, parent.lowerVD)
+ if err != nil {
+ return nil, err
}
- childMerkleVD, childMerkleErr := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
- // We will handle ENOENT separately, as it may indicate unexpected
- // modifications to the file system, and may cause a sentry panic.
- if childMerkleErr != nil && childMerkleErr != syserror.ENOENT {
- return nil, childMerkleErr
+ childVD, err := parent.getLowerAt(ctx, vfsObj, name)
+ if err == syserror.ENOENT {
+ return nil, alertIntegrityViolation(fmt.Sprintf("file %s expected but not found", parentPath+"/"+name))
+ }
+ if err != nil {
+ return nil, err
}
// The dentry needs to be cleaned up if any error occurs. IncRef will be
// called if a verity child dentry is successfully created.
- if childMerkleErr == nil {
- defer childMerkleVD.DecRef(ctx)
- }
+ defer childVD.DecRef(ctx)
- // Get the path to the parent dentry. This is only used to provide path
- // information in failure case.
- parentPath, err := vfsObj.PathnameWithDeleted(ctx, parent.fs.rootDentry.lowerVD, parent.lowerVD)
- if err != nil {
+ childMerkleVD, err := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
+ if err == syserror.ENOENT {
+ if !fs.allowRuntimeEnable {
+ return nil, alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", parentPath+"/"+name))
+ }
+ childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: parent.lowerVD,
+ Start: parent.lowerVD,
+ Path: fspath.Parse(merklePrefix + name),
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDWR | linux.O_CREAT,
+ Mode: 0644,
+ })
+ if err != nil {
+ return nil, err
+ }
+ childMerkleFD.DecRef(ctx)
+ childMerkleVD, err = parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err != nil && err != syserror.ENOENT {
return nil, err
}
- // TODO(b/166474175): Investigate all possible errors of childErr and
- // childMerkleErr, and make sure we differentiate all errors that
- // indicate unexpected modifications to the file system from the ones
- // that are not harmful.
- if childErr == syserror.ENOENT && childMerkleErr == nil {
- // Failed to get child file/directory dentry. However the
- // corresponding Merkle tree is found. This indicates an
- // unexpected modification to the file system that
- // removed/renamed the child.
- return nil, alertIntegrityViolation(fmt.Sprintf("Target file %s is expected but missing", parentPath+"/"+name))
- } else if childErr == nil && childMerkleErr == syserror.ENOENT {
- // If in allowRuntimeEnable mode, and the Merkle tree file is
- // not created yet, we create an empty Merkle tree file, so that
- // if the file is enabled through ioctl, we have the Merkle tree
- // file open and ready to use.
- // This may cause empty and unused Merkle tree files in
- // allowRuntimeEnable mode, if they are never enabled. This
- // does not affect verification, as we rely on cached hash to
- // decide whether to perform verification, not the existence of
- // the Merkle tree file. Also, those Merkle tree files are
- // always hidden and cannot be accessed by verity fs users.
- if fs.allowRuntimeEnable {
- childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: parent.lowerVD,
- Start: parent.lowerVD,
- Path: fspath.Parse(merklePrefix + name),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT,
- Mode: 0644,
- })
- if err != nil {
- return nil, err
- }
- childMerkleFD.DecRef(ctx)
- childMerkleVD, err = parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
- if err != nil {
- return nil, err
- }
- } else {
- // If runtime enable is not allowed. This indicates an
- // unexpected modification to the file system that
- // removed/renamed the Merkle tree file.
- return nil, alertIntegrityViolation(fmt.Sprintf("Expected Merkle file for target %s but none found", parentPath+"/"+name))
- }
- } else if childErr == syserror.ENOENT && childMerkleErr == syserror.ENOENT {
- // Both the child and the corresponding Merkle tree are missing.
- // This could be an unexpected modification or due to incorrect
- // parameter.
- // TODO(b/167752508): Investigate possible ways to differentiate
- // cases that both files are deleted from cases that they never
- // exist in the file system.
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to find file %s", parentPath+"/"+name))
- }
+ // The dentry needs to be cleaned up if any error occurs. IncRef will be
+ // called if a verity child dentry is successfully created.
+ defer childMerkleVD.DecRef(ctx)
mask := uint32(linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID)
stat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
@@ -577,6 +591,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
child.mode = uint32(stat.Mode)
child.uid = stat.UID
child.gid = stat.GID
+ child.childrenNames = make(map[string]struct{})
// Verify child hash. This should always be performed unless in
// allowRuntimeEnable mode and the parent directory hasn't been enabled
@@ -588,7 +603,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
}
}
if child.verityEnabled() {
- if err := fs.verifyStat(ctx, child, stat); err != nil {
+ if err := fs.verifyStatAndChildren(ctx, child, stat); err != nil {
child.destroyLocked(ctx)
return nil, err
}
@@ -944,7 +959,7 @@ func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
return linux.Statx{}, err
}
if d.verityEnabled() {
- if err := fs.verifyStat(ctx, d, stat); err != nil {
+ if err := fs.verifyStatAndChildren(ctx, d, stat); err != nil {
return linux.Statx{}, err
}
}
diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go
index faa862c55..87dabe038 100644
--- a/pkg/sentry/fsimpl/verity/verity.go
+++ b/pkg/sentry/fsimpl/verity/verity.go
@@ -22,6 +22,7 @@
package verity
import (
+ "encoding/json"
"fmt"
"math"
"strconv"
@@ -60,6 +61,15 @@ const (
// file size. For a directory, this is the size of all its children's hashes.
merkleSizeXattr = "user.merkle.size"
+ // childrenOffsetXattr is the extended attribute name specifying the
+ // names of the offset of the serialized children names in the Merkle
+ // tree file.
+ childrenOffsetXattr = "user.merkle.childrenOffset"
+
+ // childrenSizeXattr is the extended attribute name specifying the size
+ // of the serialized children names.
+ childrenSizeXattr = "user.merkle.childrenSize"
+
// sizeOfStringInt32 is the size for a 32 bit integer stored as string in
// extended attributes. The maximum value of a 32 bit integer has 10 digits.
sizeOfStringInt32 = 10
@@ -299,9 +309,70 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
d.uid = stat.UID
d.gid = stat.GID
d.hash = make([]byte, len(iopts.RootHash))
+ d.childrenNames = make(map[string]struct{})
if !fs.allowRuntimeEnable {
- if err := fs.verifyStat(ctx, d, stat); err != nil {
+ // Get children names from the underlying file system.
+ offString, err := vfsObj.GetXattrAt(ctx, creds, &vfs.PathOperation{
+ Root: lowerMerkleVD,
+ Start: lowerMerkleVD,
+ }, &vfs.GetXattrOptions{
+ Name: childrenOffsetXattr,
+ Size: sizeOfStringInt32,
+ })
+ if err == syserror.ENOENT || err == syserror.ENODATA {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", childrenOffsetXattr, err))
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ off, err := strconv.Atoi(offString)
+ if err != nil {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s to int: %v", childrenOffsetXattr, err))
+ }
+
+ sizeString, err := vfsObj.GetXattrAt(ctx, creds, &vfs.PathOperation{
+ Root: lowerMerkleVD,
+ Start: lowerMerkleVD,
+ }, &vfs.GetXattrOptions{
+ Name: childrenSizeXattr,
+ Size: sizeOfStringInt32,
+ })
+ if err == syserror.ENOENT || err == syserror.ENODATA {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", childrenSizeXattr, err))
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ size, err := strconv.Atoi(sizeString)
+ if err != nil {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s to int: %v", childrenSizeXattr, err))
+ }
+
+ lowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: lowerMerkleVD,
+ Start: lowerMerkleVD,
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ })
+ if err == syserror.ENOENT {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to open root Merkle file: %v", err))
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ childrenNames := make([]byte, size)
+ if _, err := lowerMerkleFD.PRead(ctx, usermem.BytesIOSequence(childrenNames), int64(off), vfs.ReadOptions{}); err != nil {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to read root children map: %v", err))
+ }
+
+ if err := json.Unmarshal(childrenNames, &d.childrenNames); err != nil {
+ return nil, nil, alertIntegrityViolation(fmt.Sprintf("Failed to deserialize childrenNames: %v", err))
+ }
+
+ if err := fs.verifyStatAndChildren(ctx, d, stat); err != nil {
return nil, nil, err
}
}
@@ -352,6 +423,11 @@ type dentry struct {
dirMu sync.Mutex `state:"nosave"`
children map[string]*dentry
+ // childrenNames stores the name of all children of the dentry. This is
+ // used by verity to check whether a child is expected. This is only
+ // populated by enableVerity.
+ childrenNames map[string]struct{}
+
// lowerVD is the VirtualDentry in the underlying file system.
lowerVD vfs.VirtualDentry
@@ -603,7 +679,7 @@ func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linu
return linux.Statx{}, err
}
if fd.d.verityEnabled() {
- if err := fd.d.fs.verifyStat(ctx, fd.d, stat); err != nil {
+ if err := fd.d.fs.verifyStatAndChildren(ctx, fd.d, stat); err != nil {
return linux.Statx{}, err
}
}
@@ -664,6 +740,7 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,
params := &merkletree.GenerateParams{
TreeReader: &merkleReader,
TreeWriter: &merkleWriter,
+ Children: fd.d.childrenNames,
//TODO(b/156980949): Support passing other hash algorithms.
HashAlgorithms: fd.d.fs.alg.toLinuxHashAlg(),
}
@@ -716,9 +793,45 @@ func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64,
return hash, uint64(params.Size), err
}
+// recordChildren writes the names of fd's children into the corresponding
+// Merkle tree file, and saves the offset/size of the map into xattrs.
+//
+// Preconditions: fd.d.isDir() == true
+func (fd *fileDescription) recordChildren(ctx context.Context) error {
+ // Record the children names in the Merkle tree file.
+ childrenNames, err := json.Marshal(fd.d.childrenNames)
+ if err != nil {
+ return err
+ }
+
+ stat, err := fd.merkleWriter.Stat(ctx, vfs.StatOptions{})
+ if err != nil {
+ return err
+ }
+
+ if err := fd.merkleWriter.SetXattr(ctx, &vfs.SetXattrOptions{
+ Name: childrenOffsetXattr,
+ Value: strconv.Itoa(int(stat.Size)),
+ }); err != nil {
+ return err
+ }
+ if err := fd.merkleWriter.SetXattr(ctx, &vfs.SetXattrOptions{
+ Name: childrenSizeXattr,
+ Value: strconv.Itoa(len(childrenNames)),
+ }); err != nil {
+ return err
+ }
+
+ if _, err = fd.merkleWriter.Write(ctx, usermem.BytesIOSequence(childrenNames), vfs.WriteOptions{}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
// enableVerity enables verity features on fd by generating a Merkle tree file
// and stores its hash in its parent directory's Merkle tree.
-func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (uintptr, error) {
+func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {
if !fd.d.fs.allowRuntimeEnable {
return 0, syserror.EPERM
}
@@ -761,6 +874,9 @@ func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (ui
}); err != nil {
return 0, err
}
+
+ // Add the current child's name to parent's childrenNames.
+ fd.d.parent.childrenNames[fd.d.name] = struct{}{}
}
// Record the size of the data being hashed for fd.
@@ -770,12 +886,18 @@ func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (ui
}); err != nil {
return 0, err
}
+
+ if fd.d.isDir() {
+ if err := fd.recordChildren(ctx); err != nil {
+ return 0, err
+ }
+ }
fd.d.hash = append(fd.d.hash, hash...)
return 0, nil
}
// measureVerity returns the hash of fd, saved in verityDigest.
-func (fd *fileDescription) measureVerity(ctx context.Context, uio usermem.IO, verityDigest usermem.Addr) (uintptr, error) {
+func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest usermem.Addr) (uintptr, error) {
t := kernel.TaskFromContext(ctx)
if t == nil {
return 0, syserror.EINVAL
@@ -815,7 +937,7 @@ func (fd *fileDescription) measureVerity(ctx context.Context, uio usermem.IO, ve
return 0, err
}
-func (fd *fileDescription) verityFlags(ctx context.Context, uio usermem.IO, flags usermem.Addr) (uintptr, error) {
+func (fd *fileDescription) verityFlags(ctx context.Context, flags usermem.Addr) (uintptr, error) {
f := int32(0)
// All enabled files should store a hash. This flag is not settable via
@@ -836,11 +958,11 @@ func (fd *fileDescription) verityFlags(ctx context.Context, uio usermem.IO, flag
func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {
switch cmd := args[1].Uint(); cmd {
case linux.FS_IOC_ENABLE_VERITY:
- return fd.enableVerity(ctx, uio)
+ return fd.enableVerity(ctx)
case linux.FS_IOC_MEASURE_VERITY:
- return fd.measureVerity(ctx, uio, args[2].Pointer())
+ return fd.measureVerity(ctx, args[2].Pointer())
case linux.FS_IOC_GETFLAGS:
- return fd.verityFlags(ctx, uio, args[2].Pointer())
+ return fd.verityFlags(ctx, args[2].Pointer())
default:
// TODO(b/169682228): Investigate which ioctl commands should
// be allowed.
@@ -902,14 +1024,15 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
}
n, err := merkletree.Verify(&merkletree.VerifyParams{
- Out: dst.Writer(ctx),
- File: &dataReader,
- Tree: &merkleReader,
- Size: int64(size),
- Name: fd.d.name,
- Mode: fd.d.mode,
- UID: fd.d.uid,
- GID: fd.d.gid,
+ Out: dst.Writer(ctx),
+ File: &dataReader,
+ Tree: &merkleReader,
+ Size: int64(size),
+ Name: fd.d.name,
+ Mode: fd.d.mode,
+ UID: fd.d.uid,
+ GID: fd.d.gid,
+ Children: fd.d.childrenNames,
//TODO(b/156980949): Support passing other hash algorithms.
HashAlgorithms: fd.d.fs.alg.toLinuxHashAlg(),
ReadOffset: offset,
diff --git a/pkg/sentry/fsimpl/verity/verity_test.go b/pkg/sentry/fsimpl/verity/verity_test.go
index b2da9dd96..7196e74eb 100644
--- a/pkg/sentry/fsimpl/verity/verity_test.go
+++ b/pkg/sentry/fsimpl/verity/verity_test.go
@@ -18,6 +18,7 @@ import (
"fmt"
"io"
"math/rand"
+ "strconv"
"testing"
"time"
@@ -307,6 +308,56 @@ func TestReopenUnmodifiedFileSucceeds(t *testing.T) {
}
}
+// TestOpenNonexistentFile ensures that opening a nonexistent file does not
+// trigger verification failure, even if the parent directory is verified.
+func TestOpenNonexistentFile(t *testing.T) {
+ vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
+ if err != nil {
+ t.Fatalf("newVerityRoot: %v", err)
+ }
+
+ filename := "verity-test-file"
+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
+ if err != nil {
+ t.Fatalf("newFileFD: %v", err)
+ }
+
+ // Enable verity on the file and confirms a normal read succeeds.
+ var args arch.SyscallArguments
+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("Ioctl: %v", err)
+ }
+
+ // Enable verity on the parent directory.
+ parentFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: root,
+ Start: root,
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ })
+ if err != nil {
+ t.Fatalf("OpenAt: %v", err)
+ }
+
+ if _, err := parentFD.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("Ioctl: %v", err)
+ }
+
+ // Ensure open an unexpected file in the parent directory fails with
+ // ENOENT rather than verification failure.
+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: root,
+ Start: root,
+ Path: fspath.Parse(filename + "abc"),
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ Mode: linux.ModeRegular,
+ }); err != syserror.ENOENT {
+ t.Errorf("OpenAt unexpected error: %v", err)
+ }
+}
+
// TestPReadModifiedFileFails ensures that read from a modified verity file
// fails.
func TestPReadModifiedFileFails(t *testing.T) {
@@ -439,10 +490,10 @@ func TestModifiedMerkleFails(t *testing.T) {
// Flip a random bit in the Merkle tree file.
stat, err := lowerMerkleFD.Stat(ctx, vfs.StatOptions{})
if err != nil {
- t.Fatalf("stat: %v", err)
+ t.Errorf("lowerMerkleFD.Stat: %v", err)
}
- merkleSize := int(stat.Size)
- if err := corruptRandomBit(ctx, lowerMerkleFD, merkleSize); err != nil {
+
+ if err := corruptRandomBit(ctx, lowerMerkleFD, int(stat.Size)); err != nil {
t.Fatalf("corruptRandomBit: %v", err)
}
@@ -510,11 +561,17 @@ func TestModifiedParentMerkleFails(t *testing.T) {
// This parent directory contains only one child, so any random
// modification in the parent Merkle tree should cause verification
// failure when opening the child file.
- stat, err := parentLowerMerkleFD.Stat(ctx, vfs.StatOptions{})
+ sizeString, err := parentLowerMerkleFD.GetXattr(ctx, &vfs.GetXattrOptions{
+ Name: childrenOffsetXattr,
+ Size: sizeOfStringInt32,
+ })
+ if err != nil {
+ t.Fatalf("parentLowerMerkleFD.GetXattr: %v", err)
+ }
+ parentMerkleSize, err := strconv.Atoi(sizeString)
if err != nil {
- t.Fatalf("stat: %v", err)
+ t.Fatalf("Failed convert size to int: %v", err)
}
- parentMerkleSize := int(stat.Size)
if err := corruptRandomBit(ctx, parentLowerMerkleFD, parentMerkleSize); err != nil {
t.Fatalf("corruptRandomBit: %v", err)
}
@@ -602,124 +659,165 @@ func TestModifiedStatFails(t *testing.T) {
}
}
-// TestOpenDeletedOrRenamedFileFails ensures that opening a deleted/renamed
-// verity enabled file or the corresponding Merkle tree file fails with the
-// verify error.
+// TestOpenDeletedFileFails ensures that opening a deleted verity enabled file
+// and/or the corresponding Merkle tree file fails with the verity error.
func TestOpenDeletedFileFails(t *testing.T) {
testCases := []struct {
- // Tests removing files is remove is true. Otherwise tests
- // renaming files.
- remove bool
- // The original file is removed/renamed if changeFile is true.
+ // The original file is removed if changeFile is true.
changeFile bool
- // The Merkle tree file is removed/renamed if changeMerkleFile
- // is true.
+ // The Merkle tree file is removed if changeMerkleFile is true.
changeMerkleFile bool
}{
{
- remove: true,
changeFile: true,
changeMerkleFile: false,
},
{
- remove: true,
changeFile: false,
changeMerkleFile: true,
},
{
- remove: false,
changeFile: true,
- changeMerkleFile: false,
+ changeMerkleFile: true,
},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("changeFile:%t, changeMerkleFile:%t", tc.changeFile, tc.changeMerkleFile), func(t *testing.T) {
+ vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
+ if err != nil {
+ t.Fatalf("newVerityRoot: %v", err)
+ }
+
+ filename := "verity-test-file"
+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
+ if err != nil {
+ t.Fatalf("newFileFD: %v", err)
+ }
+
+ // Enable verity on the file.
+ var args arch.SyscallArguments
+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("Ioctl: %v", err)
+ }
+
+ rootLowerVD := root.Dentry().Impl().(*dentry).lowerVD
+ if tc.changeFile {
+ if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
+ Path: fspath.Parse(filename),
+ }); err != nil {
+ t.Fatalf("UnlinkAt: %v", err)
+ }
+ }
+ if tc.changeMerkleFile {
+ if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
+ Path: fspath.Parse(merklePrefix + filename),
+ }); err != nil {
+ t.Fatalf("UnlinkAt: %v", err)
+ }
+ }
+
+ // Ensure reopening the verity enabled file fails.
+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: root,
+ Start: root,
+ Path: fspath.Parse(filename),
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ Mode: linux.ModeRegular,
+ }); err != syserror.EIO {
+ t.Errorf("got OpenAt error: %v, expected EIO", err)
+ }
+ })
+ }
+}
+
+// TestOpenRenamedFileFails ensures that opening a renamed verity enabled file
+// and/or the corresponding Merkle tree file fails with the verity error.
+func TestOpenRenamedFileFails(t *testing.T) {
+ testCases := []struct {
+ // The original file is renamed if changeFile is true.
+ changeFile bool
+ // The Merkle tree file is renamed if changeMerkleFile is true.
+ changeMerkleFile bool
+ }{
{
- remove: false,
changeFile: true,
changeMerkleFile: false,
},
+ {
+ changeFile: false,
+ changeMerkleFile: true,
+ },
+ {
+ changeFile: true,
+ changeMerkleFile: true,
+ },
}
for _, tc := range testCases {
- t.Run(fmt.Sprintf("remove:%t", tc.remove), func(t *testing.T) {
- for _, alg := range hashAlgs {
- vfsObj, root, ctx, err := newVerityRoot(t, alg)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
+ t.Run(fmt.Sprintf("changeFile:%t, changeMerkleFile:%t", tc.changeFile, tc.changeMerkleFile), func(t *testing.T) {
+ vfsObj, root, ctx, err := newVerityRoot(t, SHA256)
+ if err != nil {
+ t.Fatalf("newVerityRoot: %v", err)
+ }
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
+ filename := "verity-test-file"
+ fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
+ if err != nil {
+ t.Fatalf("newFileFD: %v", err)
+ }
- rootLowerVD := root.Dentry().Impl().(*dentry).lowerVD
- if tc.remove {
- if tc.changeFile {
- if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(filename),
- }); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + filename),
- }); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- } else {
- newFilename := "renamed-test-file"
- if tc.changeFile {
- if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(filename),
- }, &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(newFilename),
- }, &vfs.RenameOptions{}); err != nil {
- t.Fatalf("RenameAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + filename),
- }, &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + newFilename),
- }, &vfs.RenameOptions{}); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- }
+ // Enable verity on the file.
+ var args arch.SyscallArguments
+ args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
+ if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
+ t.Fatalf("Ioctl: %v", err)
+ }
- // Ensure reopening the verity enabled file fails.
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
+ rootLowerVD := root.Dentry().Impl().(*dentry).lowerVD
+ newFilename := "renamed-test-file"
+ if tc.changeFile {
+ if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- Mode: linux.ModeRegular,
- }); err != syserror.EIO {
- t.Errorf("got OpenAt error: %v, expected EIO", err)
+ }, &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
+ Path: fspath.Parse(newFilename),
+ }, &vfs.RenameOptions{}); err != nil {
+ t.Fatalf("RenameAt: %v", err)
}
}
+ if tc.changeMerkleFile {
+ if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
+ Path: fspath.Parse(merklePrefix + filename),
+ }, &vfs.PathOperation{
+ Root: rootLowerVD,
+ Start: rootLowerVD,
+ Path: fspath.Parse(merklePrefix + newFilename),
+ }, &vfs.RenameOptions{}); err != nil {
+ t.Fatalf("UnlinkAt: %v", err)
+ }
+ }
+
+ // Ensure reopening the verity enabled file fails.
+ if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
+ Root: root,
+ Start: root,
+ Path: fspath.Parse(filename),
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDONLY,
+ Mode: linux.ModeRegular,
+ }); err != syserror.EIO {
+ t.Errorf("got OpenAt error: %v, expected EIO", err)
+ }
})
}
}