summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fs/ext
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/fs/ext')
-rw-r--r--pkg/sentry/fs/ext/BUILD1
-rw-r--r--pkg/sentry/fs/ext/block_map_file.go178
-rw-r--r--pkg/sentry/fs/ext/block_map_test.go157
-rw-r--r--pkg/sentry/fs/ext/ext_test.go12
-rw-r--r--pkg/sentry/fs/ext/extent_test.go54
5 files changed, 333 insertions, 69 deletions
diff --git a/pkg/sentry/fs/ext/BUILD b/pkg/sentry/fs/ext/BUILD
index 8158aa522..e3d617576 100644
--- a/pkg/sentry/fs/ext/BUILD
+++ b/pkg/sentry/fs/ext/BUILD
@@ -54,6 +54,7 @@ go_test(
name = "ext_test",
size = "small",
srcs = [
+ "block_map_test.go",
"ext_test.go",
"extent_test.go",
],
diff --git a/pkg/sentry/fs/ext/block_map_file.go b/pkg/sentry/fs/ext/block_map_file.go
index 9aabbd145..e9b11d65d 100644
--- a/pkg/sentry/fs/ext/block_map_file.go
+++ b/pkg/sentry/fs/ext/block_map_file.go
@@ -16,9 +16,15 @@ package ext
import (
"io"
- "sync"
+ "math"
"gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/syserror"
+)
+
+const (
+ // numDirectBlks is the number of direct blocks in ext block map inodes.
+ numDirectBlks = 12
)
// blockMapFile is a type of regular file which uses direct/indirect block
@@ -26,14 +32,25 @@ import (
type blockMapFile struct {
regFile regularFile
- // mu serializes changes to fileToPhysBlks.
- mu sync.RWMutex
+ // directBlks are the direct blocks numbers. The physical blocks pointed by
+ // these holds file data. Contains file blocks 0 to 11.
+ directBlks [numDirectBlks]uint32
+
+ // indirectBlk is the physical block which contains (blkSize/4) direct block
+ // numbers (as uint32 integers).
+ indirectBlk uint32
+
+ // doubleIndirectBlk is the physical block which contains (blkSize/4) indirect
+ // block numbers (as uint32 integers).
+ doubleIndirectBlk uint32
+
+ // tripleIndirectBlk is the physical block which contains (blkSize/4) doubly
+ // indirect block numbers (as uint32 integers).
+ tripleIndirectBlk uint32
- // fileToPhysBlks maps the file block numbers to the physical block numbers.
- // the physical block number for the (i)th file block is stored in the (i)th
- // index. This is initialized (at max) with the first 12 entries. The rest
- // have to be read in from disk when required. Protected by mu.
- fileToPhysBlks []uint32
+ // coverage at (i)th index indicates the amount of file data a node at
+ // height (i) covers. Height 0 is the direct block.
+ coverage [4]uint64
}
// Compiles only if blockMapFile implements fileReader.
@@ -41,7 +58,12 @@ var _ fileReader = (*blockMapFile)(nil)
// Read implements fileReader.getFileReader.
func (f *blockMapFile) getFileReader(dev io.ReaderAt, blkSize uint64, offset uint64) io.Reader {
- panic("unimplemented")
+ return &blockMapReader{
+ dev: dev,
+ file: f,
+ fileOff: offset,
+ blkSize: blkSize,
+ }
}
// newBlockMapFile is the blockMapFile constructor. It initializes the file to
@@ -50,16 +72,138 @@ func newBlockMapFile(blkSize uint64, regFile regularFile) (*blockMapFile, error)
file := &blockMapFile{regFile: regFile}
file.regFile.impl = file
- toFill := uint64(12)
- blksUsed := regFile.blksUsed(blkSize)
- if blksUsed < toFill {
- toFill = blksUsed
+ for i := uint(0); i < 4; i++ {
+ file.coverage[i] = getCoverage(blkSize, i)
}
blkMap := regFile.inode.diskInode.Data()
- file.fileToPhysBlks = make([]uint32, toFill)
- for i := uint64(0); i < toFill; i++ {
- binary.Unmarshal(blkMap[i*4:(i+1)*4], binary.LittleEndian, &file.fileToPhysBlks[i])
- }
+ binary.Unmarshal(blkMap[:numDirectBlks*4], binary.LittleEndian, &file.directBlks)
+ binary.Unmarshal(blkMap[numDirectBlks*4:(numDirectBlks+1)*4], binary.LittleEndian, &file.indirectBlk)
+ binary.Unmarshal(blkMap[(numDirectBlks+1)*4:(numDirectBlks+2)*4], binary.LittleEndian, &file.doubleIndirectBlk)
+ binary.Unmarshal(blkMap[(numDirectBlks+2)*4:(numDirectBlks+3)*4], binary.LittleEndian, &file.tripleIndirectBlk)
return file, nil
}
+
+// blockMapReader implements io.Reader which will fetch fill data from the
+// block maps and build the blockMapFile.fileToPhyBlks array if required.
+type blockMapReader struct {
+ dev io.ReaderAt
+ file *blockMapFile
+ fileOff uint64
+ blkSize uint64
+}
+
+// Compiles only if blockMapReader implements io.Reader.
+var _ io.Reader = (*blockMapReader)(nil)
+
+// Read implements io.Reader.Read.
+func (r *blockMapReader) Read(dst []byte) (int, error) {
+ if len(dst) == 0 {
+ return 0, nil
+ }
+
+ if r.fileOff >= r.file.regFile.inode.diskInode.Size() {
+ return 0, io.EOF
+ }
+
+ // dirBlksEnd is the file offset until which direct blocks cover file data.
+ // Direct blocks cover 0 <= file offset < dirBlksEnd.
+ dirBlksEnd := numDirectBlks * r.file.coverage[0]
+
+ // indirBlkEnd is the file offset until which the indirect block covers file
+ // data. The indirect block covers dirBlksEnd <= file offset < indirBlkEnd.
+ indirBlkEnd := dirBlksEnd + r.file.coverage[1]
+
+ // doubIndirBlkEnd is the file offset until which the double indirect block
+ // covers file data. The double indirect block covers the range
+ // indirBlkEnd <= file offset < doubIndirBlkEnd.
+ doubIndirBlkEnd := indirBlkEnd + r.file.coverage[2]
+
+ read := 0
+ toRead := len(dst)
+ for read < toRead {
+ var err error
+ var curR int
+
+ // Figure out which block to delegate the read to.
+ switch {
+ case r.fileOff < dirBlksEnd:
+ // Direct block.
+ curR, err = r.read(r.file.directBlks[r.fileOff/r.blkSize], r.fileOff%r.blkSize, 0, dst[read:])
+ case r.fileOff < indirBlkEnd:
+ // Indirect block.
+ curR, err = r.read(r.file.indirectBlk, r.fileOff-dirBlksEnd, 1, dst[read:])
+ case r.fileOff < doubIndirBlkEnd:
+ // Doubly indirect block.
+ curR, err = r.read(r.file.doubleIndirectBlk, r.fileOff-indirBlkEnd, 2, dst[read:])
+ default:
+ // Triply indirect block.
+ curR, err = r.read(r.file.tripleIndirectBlk, r.fileOff-doubIndirBlkEnd, 3, dst[read:])
+ }
+
+ read += curR
+ if err != nil {
+ return read, err
+ }
+ }
+
+ return read, nil
+}
+
+// read is the recursive step of the Read function. It relies on knowing the
+// current node's location on disk (curPhyBlk) and its height in the block map
+// tree. A height of 0 shows that the current node is actually holding file
+// data. relFileOff tells the offset from which we need to start to reading
+// under the current node. It is completely relative to the current node.
+func (r *blockMapReader) read(curPhyBlk uint32, relFileOff uint64, height uint, dst []byte) (int, error) {
+ curPhyBlkOff := int64(curPhyBlk) * int64(r.blkSize)
+ if height == 0 {
+ toRead := int(r.blkSize - relFileOff)
+ if len(dst) < toRead {
+ toRead = len(dst)
+ }
+
+ n, _ := r.dev.ReadAt(dst[:toRead], curPhyBlkOff+int64(relFileOff))
+ r.fileOff += uint64(n)
+ if n < toRead {
+ return n, syserror.EIO
+ }
+ return n, nil
+ }
+
+ childCov := r.file.coverage[height-1]
+ startIdx := relFileOff / childCov
+ endIdx := r.blkSize / 4 // This is exclusive.
+ wantEndIdx := (relFileOff + uint64(len(dst))) / childCov
+ wantEndIdx++ // Make this exclusive.
+ if wantEndIdx < endIdx {
+ endIdx = wantEndIdx
+ }
+
+ read := 0
+ curChildOff := relFileOff % childCov
+ for i := startIdx; i < endIdx; i++ {
+ var childPhyBlk uint32
+ if err := readFromDisk(r.dev, curPhyBlkOff+int64(i*4), &childPhyBlk); err != nil {
+ return read, err
+ }
+
+ n, err := r.read(childPhyBlk, curChildOff, height-1, dst[read:])
+ read += n
+ if err != nil {
+ return read, err
+ }
+
+ curChildOff = 0
+ }
+
+ return read, nil
+}
+
+// getCoverage returns the number of bytes a node at the given height covers.
+// Height 0 is the file data block itself. Height 1 is the indirect block.
+//
+// Formula: blkSize * ((blkSize / 4)^height)
+func getCoverage(blkSize uint64, height uint) uint64 {
+ return blkSize * uint64(math.Pow(float64(blkSize/4), float64(height)))
+}
diff --git a/pkg/sentry/fs/ext/block_map_test.go b/pkg/sentry/fs/ext/block_map_test.go
new file mode 100644
index 000000000..b92ae80c8
--- /dev/null
+++ b/pkg/sentry/fs/ext/block_map_test.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "gvisor.dev/gvisor/pkg/binary"
+ "gvisor.dev/gvisor/pkg/sentry/fs/ext/disklayout"
+)
+
+// These consts are for mocking the block map tree.
+const (
+ mockBMBlkSize = uint32(16)
+ mockBMDiskSize = 2500
+)
+
+// TestBlockMapReader stress tests block map reader functionality. It performs
+// random length reads from all possible positions in the block map structure.
+func TestBlockMapReader(t *testing.T) {
+ dev, mockBMFile, want := blockMapSetUp(t)
+ n := len(want)
+
+ for from := 0; from < n; from++ {
+ fileReader := mockBMFile.getFileReader(dev, uint64(mockBMBlkSize), uint64(from))
+ got := make([]byte, n-from)
+
+ if read, err := io.ReadFull(fileReader, got); err != nil {
+ t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
+ }
+
+ if diff := cmp.Diff(got, want[from:]); diff != "" {
+ t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
+ }
+ }
+}
+
+// blkNumGen is a number generator which gives block numbers for building the
+// block map file on disk. It gives unique numbers in a random order which
+// facilitates in creating an extremely fragmented filesystem.
+type blkNumGen struct {
+ nums []uint32
+}
+
+// newBlkNumGen is the blkNumGen constructor.
+func newBlkNumGen() *blkNumGen {
+ blkNums := &blkNumGen{}
+ lim := mockBMDiskSize / mockBMBlkSize
+ blkNums.nums = make([]uint32, lim)
+ for i := uint32(0); i < lim; i++ {
+ blkNums.nums[i] = i
+ }
+
+ rand.Shuffle(int(lim), func(i, j int) {
+ blkNums.nums[i], blkNums.nums[j] = blkNums.nums[j], blkNums.nums[i]
+ })
+ return blkNums
+}
+
+// next returns the next random block number.
+func (n *blkNumGen) next() uint32 {
+ ret := n.nums[0]
+ n.nums = n.nums[1:]
+ return ret
+}
+
+// blockMapSetUp creates a mock disk and a block map file. It initializes the
+// block map file with 12 direct block, 1 indirect block, 1 double indirect
+// block and 1 triple indirect block (basically fill it till the rim). It
+// initializes the disk to reflect the inode. Also returns the file data that
+// the inode covers and that is written to disk.
+func blockMapSetUp(t *testing.T) (io.ReaderAt, *blockMapFile, []byte) {
+ mockDisk := make([]byte, mockBMDiskSize)
+ regFile := regularFile{
+ inode: inode{
+ diskInode: &disklayout.InodeNew{
+ InodeOld: disklayout.InodeOld{
+ SizeLo: getMockBMFileFize(),
+ },
+ },
+ },
+ }
+
+ var fileData []byte
+ blkNums := newBlkNumGen()
+ var data []byte
+
+ // Write the direct blocks.
+ for i := 0; i < numDirectBlks; i++ {
+ curBlkNum := blkNums.next()
+ data = binary.Marshal(data, binary.LittleEndian, curBlkNum)
+ fileData = append(fileData, writeFileDataToBlock(mockDisk, curBlkNum, 0, blkNums)...)
+ }
+
+ // Write to indirect block.
+ indirectBlk := blkNums.next()
+ data = binary.Marshal(data, binary.LittleEndian, indirectBlk)
+ fileData = append(fileData, writeFileDataToBlock(mockDisk, indirectBlk, 1, blkNums)...)
+
+ // Write to indirect block.
+ doublyIndirectBlk := blkNums.next()
+ data = binary.Marshal(data, binary.LittleEndian, doublyIndirectBlk)
+ fileData = append(fileData, writeFileDataToBlock(mockDisk, doublyIndirectBlk, 2, blkNums)...)
+
+ // Write to indirect block.
+ triplyIndirectBlk := blkNums.next()
+ data = binary.Marshal(data, binary.LittleEndian, triplyIndirectBlk)
+ fileData = append(fileData, writeFileDataToBlock(mockDisk, triplyIndirectBlk, 3, blkNums)...)
+
+ copy(regFile.inode.diskInode.Data(), data)
+
+ mockFile, err := newBlockMapFile(uint64(mockBMBlkSize), regFile)
+ if err != nil {
+ t.Fatalf("newBlockMapFile failed: %v", err)
+ }
+ return bytes.NewReader(mockDisk), mockFile, fileData
+}
+
+// writeFileDataToBlock writes random bytes to the block on disk.
+func writeFileDataToBlock(disk []byte, blkNum uint32, height uint, blkNums *blkNumGen) []byte {
+ if height == 0 {
+ start := blkNum * mockBMBlkSize
+ end := start + mockBMBlkSize
+ rand.Read(disk[start:end])
+ return disk[start:end]
+ }
+
+ var fileData []byte
+ for off := blkNum * mockBMBlkSize; off < (blkNum+1)*mockBMBlkSize; off += 4 {
+ curBlkNum := blkNums.next()
+ copy(disk[off:off+4], binary.Marshal(nil, binary.LittleEndian, curBlkNum))
+ fileData = append(fileData, writeFileDataToBlock(disk, curBlkNum, height-1, blkNums)...)
+ }
+ return fileData
+}
+
+// getMockBMFileFize gets the size of the mock block map file which is used for
+// testing.
+func getMockBMFileFize() uint32 {
+ return uint32(numDirectBlks*getCoverage(uint64(mockBMBlkSize), 0) + getCoverage(uint64(mockBMBlkSize), 1) + getCoverage(uint64(mockBMBlkSize), 2) + getCoverage(uint64(mockBMBlkSize), 3))
+}
diff --git a/pkg/sentry/fs/ext/ext_test.go b/pkg/sentry/fs/ext/ext_test.go
index 18764e92a..6396886cc 100644
--- a/pkg/sentry/fs/ext/ext_test.go
+++ b/pkg/sentry/fs/ext/ext_test.go
@@ -41,18 +41,6 @@ var (
ext4ImagePath = path.Join(assetsDir, "tiny.ext4")
)
-func beginning(_ uint64) uint64 {
- return 0
-}
-
-func middle(i uint64) uint64 {
- return i / 2
-}
-
-func end(i uint64) uint64 {
- return i
-}
-
// setUp opens imagePath as an ext Filesystem and returns all necessary
// elements required to run tests. If error is non-nil, it also returns a tear
// down function which must be called after the test is run for clean up.
diff --git a/pkg/sentry/fs/ext/extent_test.go b/pkg/sentry/fs/ext/extent_test.go
index dff401114..9b3f5469e 100644
--- a/pkg/sentry/fs/ext/extent_test.go
+++ b/pkg/sentry/fs/ext/extent_test.go
@@ -142,48 +142,22 @@ var (
}
)
-// TestExtentReader tests extentReader functionality. We should be able to use
-// the file reader like any other io.Reader.
+// TestExtentReader stress tests extentReader functionality. It performs random
+// length reads from all possible positions in the extent tree.
func TestExtentReader(t *testing.T) {
- type extentReaderTest struct {
- name string
- from func(uint64) uint64
- to func(uint64) uint64
- }
-
- tests := []extentReaderTest{
- {
- name: "read first half",
- from: beginning,
- to: middle,
- },
- {
- name: "read entire file",
- from: beginning,
- to: end,
- },
- {
- name: "read second half",
- from: middle,
- to: end,
- },
- }
-
dev, mockExtentFile, want := extentTreeSetUp(t, node0)
- size := mockExtentFile.regFile.inode.diskInode.Size()
+ n := len(want)
- for _, test := range tests {
- from := test.from(size)
- to := test.to(size)
- fileReader := mockExtentFile.getFileReader(dev, mockExtentBlkSize, from)
+ for from := 0; from < n; from++ {
+ fileReader := mockExtentFile.getFileReader(dev, mockExtentBlkSize, uint64(from))
+ got := make([]byte, n-from)
- got := make([]byte, to-from)
- if _, err := io.ReadFull(fileReader, got); err != nil {
- t.Errorf("file read failed: %v", err)
+ if read, err := io.ReadFull(fileReader, got); err != nil {
+ t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
}
- if diff := cmp.Diff(got, want[from:to]); diff != "" {
- t.Errorf("file data mismatch (-want +got):\n%s", diff)
+ if diff := cmp.Diff(got, want[from:]); diff != "" {
+ t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
}
}
}
@@ -239,7 +213,7 @@ func writeTree(in *inode, disk []byte, root *disklayout.ExtentNode, mockExtentBl
var fileData []byte
for _, ep := range root.Entries {
if root.Header.Height == 0 {
- fileData = append(fileData, writeRandomFileData(disk, ep.Entry.(*disklayout.Extent))...)
+ fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
} else {
fileData = append(fileData, writeTreeToDisk(disk, ep)...)
}
@@ -260,7 +234,7 @@ func writeTreeToDisk(disk []byte, curNode disklayout.ExtentEntryPair) []byte {
var fileData []byte
for _, ep := range curNode.Node.Entries {
if curNode.Node.Header.Height == 0 {
- fileData = append(fileData, writeRandomFileData(disk, ep.Entry.(*disklayout.Extent))...)
+ fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
} else {
fileData = append(fileData, writeTreeToDisk(disk, ep)...)
}
@@ -268,9 +242,9 @@ func writeTreeToDisk(disk []byte, curNode disklayout.ExtentEntryPair) []byte {
return fileData
}
-// writeRandomFileData writes random bytes to the blocks on disk that the
+// writeFileDataToExtent writes random bytes to the blocks on disk that the
// passed extent points to.
-func writeRandomFileData(disk []byte, ex *disklayout.Extent) []byte {
+func writeFileDataToExtent(disk []byte, ex *disklayout.Extent) []byte {
phyExStartBlk := ex.PhysicalBlock()
phyExStartOff := phyExStartBlk * mockExtentBlkSize
phyExEndOff := phyExStartOff + uint64(ex.Length)*mockExtentBlkSize