diff options
author | Jamie Liu <jamieliu@google.com> | 2019-07-18 15:09:14 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2019-07-18 15:10:29 -0700 |
commit | 163ab5e9bab4f14923433967656d20f169d0f904 (patch) | |
tree | 5e51b1573e48fe87fe0e277a32f13c78b0c2f058 /pkg/sentry/vfs/mount_test.go | |
parent | 6f7e2bb388cb29a355dece8921671c0085f53ea9 (diff) |
Sentry virtual filesystem, v2
Major differences from the current ("v1") sentry VFS:
- Path resolution is Filesystem-driven (FilesystemImpl methods call
vfs.ResolvingPath methods) rather than VFS-driven (fs package owns a
Dirent tree and calls fs.InodeOperations methods to populate it). This
drastically improves performance, primarily by reducing overhead from
inefficient synchronization and indirection. It also makes it possible
to implement remote filesystem protocols that translate FS system calls
into single RPCs, rather than having to make (at least) one RPC per path
component, significantly reducing the latency of remote filesystems
(especially during cold starts and for uncacheable shared filesystems).
- Mounts are correctly represented as a separate check based on
contextual state (current mount) rather than direct replacement in a
fs.Dirent tree. This makes it possible to support (non-recursive) bind
mounts and mount namespaces.
Included in this CL is fsimpl/memfs, an incomplete in-memory filesystem
that exists primarily to demonstrate intended filesystem implementation
patterns and for benchmarking:
BenchmarkVFS1TmpfsStat/1-6 3000000 497 ns/op
BenchmarkVFS1TmpfsStat/2-6 2000000 676 ns/op
BenchmarkVFS1TmpfsStat/3-6 2000000 904 ns/op
BenchmarkVFS1TmpfsStat/8-6 1000000 1944 ns/op
BenchmarkVFS1TmpfsStat/64-6 100000 14067 ns/op
BenchmarkVFS1TmpfsStat/100-6 50000 21700 ns/op
BenchmarkVFS2MemfsStat/1-6 10000000 197 ns/op
BenchmarkVFS2MemfsStat/2-6 5000000 233 ns/op
BenchmarkVFS2MemfsStat/3-6 5000000 268 ns/op
BenchmarkVFS2MemfsStat/8-6 3000000 477 ns/op
BenchmarkVFS2MemfsStat/64-6 500000 2592 ns/op
BenchmarkVFS2MemfsStat/100-6 300000 4045 ns/op
BenchmarkVFS1TmpfsMountStat/1-6 2000000 679 ns/op
BenchmarkVFS1TmpfsMountStat/2-6 2000000 912 ns/op
BenchmarkVFS1TmpfsMountStat/3-6 1000000 1113 ns/op
BenchmarkVFS1TmpfsMountStat/8-6 1000000 2118 ns/op
BenchmarkVFS1TmpfsMountStat/64-6 100000 14251 ns/op
BenchmarkVFS1TmpfsMountStat/100-6 100000 22397 ns/op
BenchmarkVFS2MemfsMountStat/1-6 5000000 317 ns/op
BenchmarkVFS2MemfsMountStat/2-6 5000000 361 ns/op
BenchmarkVFS2MemfsMountStat/3-6 5000000 387 ns/op
BenchmarkVFS2MemfsMountStat/8-6 3000000 582 ns/op
BenchmarkVFS2MemfsMountStat/64-6 500000 2699 ns/op
BenchmarkVFS2MemfsMountStat/100-6 300000 4133 ns/op
From this we can infer that, on this machine:
- Constant cost for tmpfs stat() is ~160ns in VFS2 and ~280ns in VFS1.
- Per-path-component cost is ~35ns in VFS2 and ~215ns in VFS1, a
difference of about 6x.
- The cost of crossing a mount boundary is about 80ns in VFS2
(MemfsMountStat/1 does approximately the same amount of work as
MemfsStat/2, except that it also crosses a mount boundary). This is an
inescapable cost of the separate mount lookup needed to support bind
mounts and mount namespaces.
PiperOrigin-RevId: 258853946
Diffstat (limited to 'pkg/sentry/vfs/mount_test.go')
-rw-r--r-- | pkg/sentry/vfs/mount_test.go | 465 |
1 files changed, 465 insertions, 0 deletions
diff --git a/pkg/sentry/vfs/mount_test.go b/pkg/sentry/vfs/mount_test.go new file mode 100644 index 000000000..f394d7483 --- /dev/null +++ b/pkg/sentry/vfs/mount_test.go @@ -0,0 +1,465 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vfs + +import ( + "fmt" + "runtime" + "sync" + "testing" +) + +func TestMountTableLookupEmpty(t *testing.T) { + var mt mountTable + mt.Init() + + parent := &Mount{} + point := &Dentry{} + if m := mt.Lookup(parent, point); m != nil { + t.Errorf("empty mountTable lookup: got %p, wanted nil", m) + } +} + +func TestMountTableInsertLookup(t *testing.T) { + var mt mountTable + mt.Init() + + mount := &Mount{} + mount.storeKey(&Mount{}, &Dentry{}) + mt.Insert(mount) + + if m := mt.Lookup(mount.parent(), mount.point()); m != mount { + t.Errorf("mountTable positive lookup: got %p, wanted %p", m, mount) + } + + otherParent := &Mount{} + if m := mt.Lookup(otherParent, mount.point()); m != nil { + t.Errorf("mountTable lookup with wrong mount parent: got %p, wanted nil", m) + } + otherPoint := &Dentry{} + if m := mt.Lookup(mount.parent(), otherPoint); m != nil { + t.Errorf("mountTable lookup with wrong mount point: got %p, wanted nil", m) + } +} + +// TODO: concurrent lookup/insertion/removal + +// must be powers of 2 +var benchNumMounts = []int{1 << 2, 1 << 5, 1 << 8} + +// For all of the following: +// +// - BenchmarkMountTableFoo tests usage pattern "Foo" for mountTable. +// +// - BenchmarkMountMapFoo tests usage pattern "Foo" for a +// sync.RWMutex-protected map. (Mutator benchmarks do not use a RWMutex, since +// mountTable also requires external synchronization between mutators.) +// +// - BenchmarkMountSyncMapFoo tests usage pattern "Foo" for a sync.Map. +// +// ParallelLookup is by far the most common and performance-sensitive operation +// for this application. NegativeLookup is also important, but less so (only +// relevant with multiple mount namespaces and significant differences in +// mounts between them). Insertion and removal are benchmarked for +// completeness. +const enableComparativeBenchmarks = false + +func newBenchMount() *Mount { + mount := &Mount{} + mount.storeKey(&Mount{}, &Dentry{}) + return mount +} + +func vdkey(mnt *Mount) VirtualDentry { + parent, point := mnt.loadKey() + return VirtualDentry{ + mount: parent, + dentry: point, + } +} + +func BenchmarkMountTableParallelLookup(b *testing.B) { + for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%dx%d", numG, numMounts) + b.Run(desc, func(b *testing.B) { + var mt mountTable + mt.Init() + keys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + mount := newBenchMount() + mt.Insert(mount) + keys = append(keys, vdkey(mount)) + } + + var ready sync.WaitGroup + begin := make(chan struct{}) + var end sync.WaitGroup + for g := 0; g < numG; g++ { + ready.Add(1) + end.Add(1) + go func() { + defer end.Done() + ready.Done() + <-begin + for i := 0; i < b.N; i++ { + k := keys[i&(numMounts-1)] + m := mt.Lookup(k.mount, k.dentry) + if m == nil { + b.Fatalf("lookup failed") + } + if parent := m.parent(); parent != k.mount { + b.Fatalf("lookup returned mount with parent %p, wanted %p", parent, k.mount) + } + if point := m.point(); point != k.dentry { + b.Fatalf("lookup returned mount with point %p, wanted %p", point, k.dentry) + } + } + }() + } + + ready.Wait() + b.ResetTimer() + close(begin) + end.Wait() + }) + } + } +} + +func BenchmarkMountMapParallelLookup(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%dx%d", numG, numMounts) + b.Run(desc, func(b *testing.B) { + var mu sync.RWMutex + ms := make(map[VirtualDentry]*Mount) + keys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + mount := newBenchMount() + key := vdkey(mount) + ms[key] = mount + keys = append(keys, key) + } + + var ready sync.WaitGroup + begin := make(chan struct{}) + var end sync.WaitGroup + for g := 0; g < numG; g++ { + ready.Add(1) + end.Add(1) + go func() { + defer end.Done() + ready.Done() + <-begin + for i := 0; i < b.N; i++ { + k := keys[i&(numMounts-1)] + mu.RLock() + m := ms[k] + mu.RUnlock() + if m == nil { + b.Fatalf("lookup failed") + } + if parent := m.parent(); parent != k.mount { + b.Fatalf("lookup returned mount with parent %p, wanted %p", parent, k.mount) + } + if point := m.point(); point != k.dentry { + b.Fatalf("lookup returned mount with point %p, wanted %p", point, k.dentry) + } + } + }() + } + + ready.Wait() + b.ResetTimer() + close(begin) + end.Wait() + }) + } + } +} + +func BenchmarkMountSyncMapParallelLookup(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%dx%d", numG, numMounts) + b.Run(desc, func(b *testing.B) { + var ms sync.Map + keys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + mount := newBenchMount() + key := vdkey(mount) + ms.Store(key, mount) + keys = append(keys, key) + } + + var ready sync.WaitGroup + begin := make(chan struct{}) + var end sync.WaitGroup + for g := 0; g < numG; g++ { + ready.Add(1) + end.Add(1) + go func() { + defer end.Done() + ready.Done() + <-begin + for i := 0; i < b.N; i++ { + k := keys[i&(numMounts-1)] + mi, ok := ms.Load(k) + if !ok { + b.Fatalf("lookup failed") + } + m := mi.(*Mount) + if parent := m.parent(); parent != k.mount { + b.Fatalf("lookup returned mount with parent %p, wanted %p", parent, k.mount) + } + if point := m.point(); point != k.dentry { + b.Fatalf("lookup returned mount with point %p, wanted %p", point, k.dentry) + } + } + }() + } + + ready.Wait() + b.ResetTimer() + close(begin) + end.Wait() + }) + } + } +} + +func BenchmarkMountTableNegativeLookup(b *testing.B) { + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%d", numMounts) + b.Run(desc, func(b *testing.B) { + var mt mountTable + mt.Init() + for i := 0; i < numMounts; i++ { + mt.Insert(newBenchMount()) + } + negkeys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + negkeys = append(negkeys, VirtualDentry{ + mount: &Mount{}, + dentry: &Dentry{}, + }) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := negkeys[i&(numMounts-1)] + m := mt.Lookup(k.mount, k.dentry) + if m != nil { + b.Fatalf("lookup got %p, wanted nil", m) + } + } + }) + } +} + +func BenchmarkMountMapNegativeLookup(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%d", numMounts) + b.Run(desc, func(b *testing.B) { + var mu sync.RWMutex + ms := make(map[VirtualDentry]*Mount) + for i := 0; i < numMounts; i++ { + mount := newBenchMount() + ms[vdkey(mount)] = mount + } + negkeys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + negkeys = append(negkeys, VirtualDentry{ + mount: &Mount{}, + dentry: &Dentry{}, + }) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := negkeys[i&(numMounts-1)] + mu.RLock() + m := ms[k] + mu.RUnlock() + if m != nil { + b.Fatalf("lookup got %p, wanted nil", m) + } + } + }) + } +} + +func BenchmarkMountSyncMapNegativeLookup(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + for _, numMounts := range benchNumMounts { + desc := fmt.Sprintf("%d", numMounts) + b.Run(desc, func(b *testing.B) { + var ms sync.Map + for i := 0; i < numMounts; i++ { + mount := newBenchMount() + ms.Store(vdkey(mount), mount) + } + negkeys := make([]VirtualDentry, 0, numMounts) + for i := 0; i < numMounts; i++ { + negkeys = append(negkeys, VirtualDentry{ + mount: &Mount{}, + dentry: &Dentry{}, + }) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := negkeys[i&(numMounts-1)] + m, _ := ms.Load(k) + if m != nil { + b.Fatalf("lookup got %p, wanted nil", m) + } + } + }) + } +} + +func BenchmarkMountTableInsert(b *testing.B) { + // Preallocate Mounts so that allocation time isn't included in the + // benchmark. + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + + var mt mountTable + mt.Init() + b.ResetTimer() + for i := range mounts { + mt.Insert(mounts[i]) + } +} + +func BenchmarkMountMapInsert(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + // Preallocate Mounts so that allocation time isn't included in the + // benchmark. + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + + ms := make(map[VirtualDentry]*Mount) + b.ResetTimer() + for i := range mounts { + mount := mounts[i] + ms[vdkey(mount)] = mount + } +} + +func BenchmarkMountSyncMapInsert(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + // Preallocate Mounts so that allocation time isn't included in the + // benchmark. + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + + var ms sync.Map + b.ResetTimer() + for i := range mounts { + mount := mounts[i] + ms.Store(vdkey(mount), mount) + } +} + +func BenchmarkMountTableRemove(b *testing.B) { + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + var mt mountTable + mt.Init() + for i := range mounts { + mt.Insert(mounts[i]) + } + + b.ResetTimer() + for i := range mounts { + mt.Remove(mounts[i]) + } +} + +func BenchmarkMountMapRemove(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + ms := make(map[VirtualDentry]*Mount) + for i := range mounts { + mount := mounts[i] + ms[vdkey(mount)] = mount + } + + b.ResetTimer() + for i := range mounts { + mount := mounts[i] + delete(ms, vdkey(mount)) + } +} + +func BenchmarkMountSyncMapRemove(b *testing.B) { + if !enableComparativeBenchmarks { + b.Skipf("comparative benchmarks are disabled") + } + + mounts := make([]*Mount, 0, b.N) + for i := 0; i < b.N; i++ { + mounts = append(mounts, newBenchMount()) + } + var ms sync.Map + for i := range mounts { + mount := mounts[i] + ms.Store(vdkey(mount), mount) + } + + b.ResetTimer() + for i := range mounts { + mount := mounts[i] + ms.Delete(vdkey(mount)) + } +} |