summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/fsimpl
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/fsimpl')
-rw-r--r--pkg/sentry/fsimpl/devpts/BUILD63
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts_state_autogen.go474
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts_test.go56
-rw-r--r--pkg/sentry/fsimpl/devpts/root_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/BUILD37
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go39
-rw-r--r--pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go230
-rw-r--r--pkg/sentry/fsimpl/eventfd/BUILD33
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go55
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd_test.go97
-rw-r--r--pkg/sentry/fsimpl/ext/BUILD104
-rw-r--r--pkg/sentry/fsimpl/ext/README.md117
-rw-r--r--pkg/sentry/fsimpl/ext/assets/README.md36
-rw-r--r--pkg/sentry/fsimpl/ext/assets/bigfile.txt41
-rw-r--r--pkg/sentry/fsimpl/ext/assets/file.txt1
l---------pkg/sentry/fsimpl/ext/assets/symlink.txt1
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext2bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext3bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext4bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/BUILD17
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go211
-rwxr-xr-xpkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh72
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_file.go203
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_test.go160
-rw-r--r--pkg/sentry/fsimpl/ext/dentry.go82
-rw-r--r--pkg/sentry/fsimpl/ext/directory.go323
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/BUILD48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group.go143
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_32.go74
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_64.go95
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent.go75
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_new.go63
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_old.go51
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/disklayout.go48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/extent.go155
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/extent_test.go30
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode.go277
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_new.go98
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_old.go119
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_test.go224
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock.go477
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_32.go78
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_64.go97
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_old.go107
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_test.go30
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/test_utils.go30
-rw-r--r--pkg/sentry/fsimpl/ext/ext.go159
-rw-r--r--pkg/sentry/fsimpl/ext/ext_test.go926
-rw-r--r--pkg/sentry/fsimpl/ext/extent_file.go239
-rw-r--r--pkg/sentry/fsimpl/ext/extent_test.go266
-rw-r--r--pkg/sentry/fsimpl/ext/file_description.go65
-rw-r--r--pkg/sentry/fsimpl/ext/filesystem.go550
-rw-r--r--pkg/sentry/fsimpl/ext/inode.go244
-rw-r--r--pkg/sentry/fsimpl/ext/regular_file.go166
-rw-r--r--pkg/sentry/fsimpl/ext/symlink.go115
-rw-r--r--pkg/sentry/fsimpl/ext/utils.go94
-rw-r--r--pkg/sentry/fsimpl/fuse/BUILD87
-rw-r--r--pkg/sentry/fsimpl/fuse/connection_test.go117
-rw-r--r--pkg/sentry/fsimpl/fuse/dev_test.go323
-rw-r--r--pkg/sentry/fsimpl/fuse/fuse_state_autogen.go525
-rw-r--r--pkg/sentry/fsimpl/fuse/inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/fuse/request_list.go193
-rw-r--r--pkg/sentry/fsimpl/fuse/utils_test.go132
-rw-r--r--pkg/sentry/fsimpl/gofer/BUILD94
-rw-r--r--pkg/sentry/fsimpl/gofer/dentry_list.go193
-rw-r--r--pkg/sentry/fsimpl/gofer/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer_state_autogen.go580
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer_test.go68
-rw-r--r--pkg/sentry/fsimpl/host/BUILD79
-rw-r--r--pkg/sentry/fsimpl/host/connected_endpoint_refs.go128
-rw-r--r--pkg/sentry/fsimpl/host/host_state_autogen.go274
-rw-r--r--pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go3
-rw-r--r--pkg/sentry/fsimpl/host/inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/kernfs/BUILD148
-rw-r--r--pkg/sentry/fsimpl/kernfs/dentry_list.go193
-rw-r--r--pkg/sentry/fsimpl/kernfs/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go925
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_test.go343
-rw-r--r--pkg/sentry/fsimpl/kernfs/slot_list.go193
-rw-r--r--pkg/sentry/fsimpl/kernfs/static_directory_refs.go128
-rw-r--r--pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go128
-rw-r--r--pkg/sentry/fsimpl/overlay/BUILD47
-rw-r--r--pkg/sentry/fsimpl/overlay/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/overlay/overlay_state_autogen.go311
-rw-r--r--pkg/sentry/fsimpl/pipefs/BUILD21
-rw-r--r--pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go105
-rw-r--r--pkg/sentry/fsimpl/proc/BUILD131
-rw-r--r--pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/proc/proc_state_autogen.go2067
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/proc/task_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_sys_test.go149
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_test.go510
-rw-r--r--pkg/sentry/fsimpl/signalfd/BUILD19
-rw-r--r--pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go49
-rw-r--r--pkg/sentry/fsimpl/sockfs/BUILD18
-rw-r--r--pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go90
-rw-r--r--pkg/sentry/fsimpl/sys/BUILD55
-rw-r--r--pkg/sentry/fsimpl/sys/dir_refs.go128
-rw-r--r--pkg/sentry/fsimpl/sys/sys_state_autogen.go247
-rw-r--r--pkg/sentry/fsimpl/sys/sys_test.go89
-rw-r--r--pkg/sentry/fsimpl/testutil/BUILD37
-rw-r--r--pkg/sentry/fsimpl/testutil/kernel.go185
-rw-r--r--pkg/sentry/fsimpl/testutil/testutil.go286
-rw-r--r--pkg/sentry/fsimpl/timerfd/BUILD17
-rw-r--r--pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go52
-rw-r--r--pkg/sentry/fsimpl/tmpfs/BUILD127
-rw-r--r--pkg/sentry/fsimpl/tmpfs/benchmark_test.go488
-rw-r--r--pkg/sentry/fsimpl/tmpfs/dentry_list.go193
-rw-r--r--pkg/sentry/fsimpl/tmpfs/fstree.go55
-rw-r--r--pkg/sentry/fsimpl/tmpfs/inode_refs.go128
-rw-r--r--pkg/sentry/fsimpl/tmpfs/pipe_test.go239
-rw-r--r--pkg/sentry/fsimpl/tmpfs/regular_file_test.go349
-rw-r--r--pkg/sentry/fsimpl/tmpfs/stat_test.go236
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go556
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs_test.go157
-rw-r--r--pkg/sentry/fsimpl/verity/BUILD51
-rw-r--r--pkg/sentry/fsimpl/verity/filesystem.go1046
-rw-r--r--pkg/sentry/fsimpl/verity/save_restore.go27
-rw-r--r--pkg/sentry/fsimpl/verity/verity.go906
-rw-r--r--pkg/sentry/fsimpl/verity/verity_test.go700
125 files changed, 9201 insertions, 14364 deletions
diff --git a/pkg/sentry/fsimpl/devpts/BUILD b/pkg/sentry/fsimpl/devpts/BUILD
deleted file mode 100644
index 6af3c3781..000000000
--- a/pkg/sentry/fsimpl/devpts/BUILD
+++ /dev/null
@@ -1,63 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "root_inode_refs",
- out = "root_inode_refs.go",
- package = "devpts",
- prefix = "rootInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "rootInode",
- },
-)
-
-go_library(
- name = "devpts",
- srcs = [
- "devpts.go",
- "line_discipline.go",
- "master.go",
- "queue.go",
- "replica.go",
- "root_inode_refs.go",
- "terminal.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "devpts_test",
- size = "small",
- srcs = ["devpts_test.go"],
- library = ":devpts",
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/contexttest",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go b/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go
new file mode 100644
index 000000000..6383dd133
--- /dev/null
+++ b/pkg/sentry/fsimpl/devpts/devpts_state_autogen.go
@@ -0,0 +1,474 @@
+// automatically generated by stateify.
+
+package devpts
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{
+ "initErr",
+ "fs",
+ "root",
+ }
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+ stateSinkObject.Save(0, &fstype.initErr)
+ stateSinkObject.Save(1, &fstype.fs)
+ stateSinkObject.Save(2, &fstype.root)
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fstype.initErr)
+ stateSourceObject.Load(1, &fstype.fs)
+ stateSourceObject.Load(2, &fstype.root)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *rootInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.rootInode"
+}
+
+func (i *rootInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "rootInodeRefs",
+ "locks",
+ "master",
+ "replicas",
+ "nextIdx",
+ }
+}
+
+func (i *rootInode) beforeSave() {}
+
+func (i *rootInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.rootInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.master)
+ stateSinkObject.Save(10, &i.replicas)
+ stateSinkObject.Save(11, &i.nextIdx)
+}
+
+func (i *rootInode) afterLoad() {}
+
+func (i *rootInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.rootInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.master)
+ stateSourceObject.Load(10, &i.replicas)
+ stateSourceObject.Load(11, &i.nextIdx)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (l *lineDiscipline) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.lineDiscipline"
+}
+
+func (l *lineDiscipline) StateFields() []string {
+ return []string{
+ "size",
+ "inQueue",
+ "outQueue",
+ "termios",
+ "column",
+ "masterWaiter",
+ "replicaWaiter",
+ }
+}
+
+func (l *lineDiscipline) beforeSave() {}
+
+func (l *lineDiscipline) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.size)
+ stateSinkObject.Save(1, &l.inQueue)
+ stateSinkObject.Save(2, &l.outQueue)
+ stateSinkObject.Save(3, &l.termios)
+ stateSinkObject.Save(4, &l.column)
+ stateSinkObject.Save(5, &l.masterWaiter)
+ stateSinkObject.Save(6, &l.replicaWaiter)
+}
+
+func (l *lineDiscipline) afterLoad() {}
+
+func (l *lineDiscipline) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.size)
+ stateSourceObject.Load(1, &l.inQueue)
+ stateSourceObject.Load(2, &l.outQueue)
+ stateSourceObject.Load(3, &l.termios)
+ stateSourceObject.Load(4, &l.column)
+ stateSourceObject.Load(5, &l.masterWaiter)
+ stateSourceObject.Load(6, &l.replicaWaiter)
+}
+
+func (o *outputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.outputQueueTransformer"
+}
+
+func (o *outputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (o *outputQueueTransformer) beforeSave() {}
+
+func (o *outputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+}
+
+func (o *outputQueueTransformer) afterLoad() {}
+
+func (o *outputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *inputQueueTransformer) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.inputQueueTransformer"
+}
+
+func (i *inputQueueTransformer) StateFields() []string {
+ return []string{}
+}
+
+func (i *inputQueueTransformer) beforeSave() {}
+
+func (i *inputQueueTransformer) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *inputQueueTransformer) afterLoad() {}
+
+func (i *inputQueueTransformer) StateLoad(stateSourceObject state.Source) {
+}
+
+func (mi *masterInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.masterInode"
+}
+
+func (mi *masterInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "root",
+ }
+}
+
+func (mi *masterInode) beforeSave() {}
+
+func (mi *masterInode) StateSave(stateSinkObject state.Sink) {
+ mi.beforeSave()
+ stateSinkObject.Save(0, &mi.implStatFS)
+ stateSinkObject.Save(1, &mi.InodeAttrs)
+ stateSinkObject.Save(2, &mi.InodeNoopRefCount)
+ stateSinkObject.Save(3, &mi.InodeNotDirectory)
+ stateSinkObject.Save(4, &mi.InodeNotSymlink)
+ stateSinkObject.Save(5, &mi.locks)
+ stateSinkObject.Save(6, &mi.root)
+}
+
+func (mi *masterInode) afterLoad() {}
+
+func (mi *masterInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mi.implStatFS)
+ stateSourceObject.Load(1, &mi.InodeAttrs)
+ stateSourceObject.Load(2, &mi.InodeNoopRefCount)
+ stateSourceObject.Load(3, &mi.InodeNotDirectory)
+ stateSourceObject.Load(4, &mi.InodeNotSymlink)
+ stateSourceObject.Load(5, &mi.locks)
+ stateSourceObject.Load(6, &mi.root)
+}
+
+func (mfd *masterFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.masterFileDescription"
+}
+
+func (mfd *masterFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "t",
+ }
+}
+
+func (mfd *masterFileDescription) beforeSave() {}
+
+func (mfd *masterFileDescription) StateSave(stateSinkObject state.Sink) {
+ mfd.beforeSave()
+ stateSinkObject.Save(0, &mfd.vfsfd)
+ stateSinkObject.Save(1, &mfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &mfd.LockFD)
+ stateSinkObject.Save(3, &mfd.inode)
+ stateSinkObject.Save(4, &mfd.t)
+}
+
+func (mfd *masterFileDescription) afterLoad() {}
+
+func (mfd *masterFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mfd.vfsfd)
+ stateSourceObject.Load(1, &mfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &mfd.LockFD)
+ stateSourceObject.Load(3, &mfd.inode)
+ stateSourceObject.Load(4, &mfd.t)
+}
+
+func (q *queue) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.queue"
+}
+
+func (q *queue) StateFields() []string {
+ return []string{
+ "readBuf",
+ "waitBuf",
+ "waitBufLen",
+ "readable",
+ "transformer",
+ }
+}
+
+func (q *queue) beforeSave() {}
+
+func (q *queue) StateSave(stateSinkObject state.Sink) {
+ q.beforeSave()
+ stateSinkObject.Save(0, &q.readBuf)
+ stateSinkObject.Save(1, &q.waitBuf)
+ stateSinkObject.Save(2, &q.waitBufLen)
+ stateSinkObject.Save(3, &q.readable)
+ stateSinkObject.Save(4, &q.transformer)
+}
+
+func (q *queue) afterLoad() {}
+
+func (q *queue) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &q.readBuf)
+ stateSourceObject.Load(1, &q.waitBuf)
+ stateSourceObject.Load(2, &q.waitBufLen)
+ stateSourceObject.Load(3, &q.readable)
+ stateSourceObject.Load(4, &q.transformer)
+}
+
+func (ri *replicaInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.replicaInode"
+}
+
+func (ri *replicaInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "root",
+ "t",
+ }
+}
+
+func (ri *replicaInode) beforeSave() {}
+
+func (ri *replicaInode) StateSave(stateSinkObject state.Sink) {
+ ri.beforeSave()
+ stateSinkObject.Save(0, &ri.implStatFS)
+ stateSinkObject.Save(1, &ri.InodeAttrs)
+ stateSinkObject.Save(2, &ri.InodeNoopRefCount)
+ stateSinkObject.Save(3, &ri.InodeNotDirectory)
+ stateSinkObject.Save(4, &ri.InodeNotSymlink)
+ stateSinkObject.Save(5, &ri.locks)
+ stateSinkObject.Save(6, &ri.root)
+ stateSinkObject.Save(7, &ri.t)
+}
+
+func (ri *replicaInode) afterLoad() {}
+
+func (ri *replicaInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ri.implStatFS)
+ stateSourceObject.Load(1, &ri.InodeAttrs)
+ stateSourceObject.Load(2, &ri.InodeNoopRefCount)
+ stateSourceObject.Load(3, &ri.InodeNotDirectory)
+ stateSourceObject.Load(4, &ri.InodeNotSymlink)
+ stateSourceObject.Load(5, &ri.locks)
+ stateSourceObject.Load(6, &ri.root)
+ stateSourceObject.Load(7, &ri.t)
+}
+
+func (rfd *replicaFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.replicaFileDescription"
+}
+
+func (rfd *replicaFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ }
+}
+
+func (rfd *replicaFileDescription) beforeSave() {}
+
+func (rfd *replicaFileDescription) StateSave(stateSinkObject state.Sink) {
+ rfd.beforeSave()
+ stateSinkObject.Save(0, &rfd.vfsfd)
+ stateSinkObject.Save(1, &rfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &rfd.LockFD)
+ stateSinkObject.Save(3, &rfd.inode)
+}
+
+func (rfd *replicaFileDescription) afterLoad() {}
+
+func (rfd *replicaFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rfd.vfsfd)
+ stateSourceObject.Load(1, &rfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &rfd.LockFD)
+ stateSourceObject.Load(3, &rfd.inode)
+}
+
+func (r *rootInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.rootInodeRefs"
+}
+
+func (r *rootInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *rootInodeRefs) beforeSave() {}
+
+func (r *rootInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *rootInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (tm *Terminal) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devpts.Terminal"
+}
+
+func (tm *Terminal) StateFields() []string {
+ return []string{
+ "n",
+ "ld",
+ "masterKTTY",
+ "replicaKTTY",
+ }
+}
+
+func (tm *Terminal) beforeSave() {}
+
+func (tm *Terminal) StateSave(stateSinkObject state.Sink) {
+ tm.beforeSave()
+ stateSinkObject.Save(0, &tm.n)
+ stateSinkObject.Save(1, &tm.ld)
+ stateSinkObject.Save(2, &tm.masterKTTY)
+ stateSinkObject.Save(3, &tm.replicaKTTY)
+}
+
+func (tm *Terminal) afterLoad() {}
+
+func (tm *Terminal) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tm.n)
+ stateSourceObject.Load(1, &tm.ld)
+ stateSourceObject.Load(2, &tm.masterKTTY)
+ stateSourceObject.Load(3, &tm.replicaKTTY)
+}
+
+func init() {
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*rootInode)(nil))
+ state.Register((*implStatFS)(nil))
+ state.Register((*lineDiscipline)(nil))
+ state.Register((*outputQueueTransformer)(nil))
+ state.Register((*inputQueueTransformer)(nil))
+ state.Register((*masterInode)(nil))
+ state.Register((*masterFileDescription)(nil))
+ state.Register((*queue)(nil))
+ state.Register((*replicaInode)(nil))
+ state.Register((*replicaFileDescription)(nil))
+ state.Register((*rootInodeRefs)(nil))
+ state.Register((*Terminal)(nil))
+}
diff --git a/pkg/sentry/fsimpl/devpts/devpts_test.go b/pkg/sentry/fsimpl/devpts/devpts_test.go
deleted file mode 100644
index 448390cfe..000000000
--- a/pkg/sentry/fsimpl/devpts/devpts_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package devpts
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func TestSimpleMasterToReplica(t *testing.T) {
- ld := newLineDiscipline(linux.DefaultReplicaTermios)
- ctx := contexttest.Context(t)
- inBytes := []byte("hello, tty\n")
- src := usermem.BytesIOSequence(inBytes)
- outBytes := make([]byte, 32)
- dst := usermem.BytesIOSequence(outBytes)
-
- // Write to the input queue.
- nw, err := ld.inputQueueWrite(ctx, src)
- if err != nil {
- t.Fatalf("error writing to input queue: %v", err)
- }
- if nw != int64(len(inBytes)) {
- t.Fatalf("wrote wrong length: got %d, want %d", nw, len(inBytes))
- }
-
- // Read from the input queue.
- nr, err := ld.inputQueueRead(ctx, dst)
- if err != nil {
- t.Fatalf("error reading from input queue: %v", err)
- }
- if nr != int64(len(inBytes)) {
- t.Fatalf("read wrong length: got %d, want %d", nr, len(inBytes))
- }
-
- outStr := string(outBytes[:nr])
- inStr := string(inBytes)
- if outStr != inStr {
- t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
- }
-}
diff --git a/pkg/sentry/fsimpl/devpts/root_inode_refs.go b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
new file mode 100644
index 000000000..cbafd113a
--- /dev/null
+++ b/pkg/sentry/fsimpl/devpts/root_inode_refs.go
@@ -0,0 +1,128 @@
+package devpts
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const rootInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var rootInodeobj *rootInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type rootInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *rootInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", rootInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *rootInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *rootInodeRefs) LogRefs() bool {
+ return rootInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *rootInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *rootInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *rootInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *rootInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *rootInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *rootInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/devtmpfs/BUILD b/pkg/sentry/fsimpl/devtmpfs/BUILD
deleted file mode 100644
index e49a04c1b..000000000
--- a/pkg/sentry/fsimpl/devtmpfs/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "devtmpfs",
- srcs = [
- "devtmpfs.go",
- "save_restore.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "devtmpfs_test",
- size = "small",
- srcs = ["devtmpfs_test.go"],
- library = ":devtmpfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go
new file mode 100644
index 000000000..87f77d00c
--- /dev/null
+++ b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_state_autogen.go
@@ -0,0 +1,39 @@
+// automatically generated by stateify.
+
+package devtmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fst *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/devtmpfs.FilesystemType"
+}
+
+func (fst *FilesystemType) StateFields() []string {
+ return []string{
+ "initErr",
+ "fs",
+ "root",
+ }
+}
+
+func (fst *FilesystemType) beforeSave() {}
+
+func (fst *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fst.beforeSave()
+ stateSinkObject.Save(0, &fst.initErr)
+ stateSinkObject.Save(1, &fst.fs)
+ stateSinkObject.Save(2, &fst.root)
+}
+
+func (fst *FilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fst.initErr)
+ stateSourceObject.Load(1, &fst.fs)
+ stateSourceObject.Load(2, &fst.root)
+ stateSourceObject.AfterLoad(fst.afterLoad)
+}
+
+func init() {
+ state.Register((*FilesystemType)(nil))
+}
diff --git a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go b/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go
deleted file mode 100644
index e058eda7a..000000000
--- a/pkg/sentry/fsimpl/devtmpfs/devtmpfs_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package devtmpfs
-
-import (
- "path"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-const devPath = "/dev"
-
-func setupDevtmpfs(t *testing.T) (context.Context, *auth.Credentials, *vfs.VirtualFilesystem, vfs.VirtualDentry, func()) {
- t.Helper()
-
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- // Register tmpfs just so that we can have a root filesystem that isn't
- // devtmpfs.
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- vfsObj.MustRegisterFilesystemType("devtmpfs", &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- // Create a test mount namespace with devtmpfs mounted at "/dev".
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "tmpfs" /* source */, "tmpfs" /* fsTypeName */, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- devpop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(devPath),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &devpop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- t.Fatalf("failed to create mount point: %v", err)
- }
- if _, err := vfsObj.MountAt(ctx, creds, "devtmpfs" /* source */, &devpop, "devtmpfs" /* fsTypeName */, &vfs.MountOptions{}); err != nil {
- t.Fatalf("failed to mount devtmpfs: %v", err)
- }
-
- return ctx, creds, vfsObj, root, func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- }
-}
-
-func TestUserspaceInit(t *testing.T) {
- ctx, creds, vfsObj, root, cleanup := setupDevtmpfs(t)
- defer cleanup()
-
- a, err := NewAccessor(ctx, vfsObj, creds, "devtmpfs")
- if err != nil {
- t.Fatalf("failed to create devtmpfs.Accessor: %v", err)
- }
- defer a.Release(ctx)
-
- // Create "userspace-initialized" files using a devtmpfs.Accessor.
- if err := a.UserspaceInit(ctx); err != nil {
- t.Fatalf("failed to userspace-initialize devtmpfs: %v", err)
- }
-
- // Created files should be visible in the test mount namespace.
- links := []struct {
- source string
- target string
- }{
- {
- source: "fd",
- target: "/proc/self/fd",
- },
- {
- source: "stdin",
- target: "/proc/self/fd/0",
- },
- {
- source: "stdout",
- target: "/proc/self/fd/1",
- },
- {
- source: "stderr",
- target: "/proc/self/fd/2",
- },
- {
- source: "ptmx",
- target: "pts/ptmx",
- },
- }
-
- for _, link := range links {
- abspath := path.Join(devPath, link.source)
- if gotTarget, err := vfsObj.ReadlinkAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }); err != nil || gotTarget != link.target {
- t.Errorf("readlink(%q): got (%q, %v), wanted (%q, nil)", abspath, gotTarget, err, link.target)
- }
- }
-
- dirs := []string{"shm", "pts"}
- for _, dir := range dirs {
- abspath := path.Join(devPath, dir)
- statx, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }, &vfs.StatOptions{
- Mask: linux.STATX_MODE,
- })
- if err != nil {
- t.Errorf("stat(%q): got error %v ", abspath, err)
- continue
- }
- if want := uint16(0755) | linux.S_IFDIR; statx.Mode != want {
- t.Errorf("stat(%q): got mode %x, want %x", abspath, statx.Mode, want)
- }
- }
-}
-
-func TestCreateDeviceFile(t *testing.T) {
- ctx, creds, vfsObj, root, cleanup := setupDevtmpfs(t)
- defer cleanup()
-
- a, err := NewAccessor(ctx, vfsObj, creds, "devtmpfs")
- if err != nil {
- t.Fatalf("failed to create devtmpfs.Accessor: %v", err)
- }
- defer a.Release(ctx)
-
- devFiles := []struct {
- path string
- kind vfs.DeviceKind
- major uint32
- minor uint32
- perms uint16
- }{
- {
- path: "dummy",
- kind: vfs.CharDevice,
- major: 12,
- minor: 34,
- perms: 0600,
- },
- {
- path: "foo/bar",
- kind: vfs.BlockDevice,
- major: 13,
- minor: 35,
- perms: 0660,
- },
- {
- path: "foo/baz",
- kind: vfs.CharDevice,
- major: 12,
- minor: 40,
- perms: 0666,
- },
- {
- path: "a/b/c/d/e",
- kind: vfs.BlockDevice,
- major: 12,
- minor: 34,
- perms: 0600,
- },
- }
-
- for _, f := range devFiles {
- if err := a.CreateDeviceFile(ctx, f.path, f.kind, f.major, f.minor, f.perms); err != nil {
- t.Fatalf("failed to create device file: %v", err)
- }
- // The device special file should be visible in the test mount namespace.
- abspath := path.Join(devPath, f.path)
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(abspath),
- }, &vfs.StatOptions{
- Mask: linux.STATX_TYPE | linux.STATX_MODE,
- })
- if err != nil {
- t.Fatalf("failed to stat device file at %q: %v", abspath, err)
- }
- if stat.RdevMajor != f.major {
- t.Errorf("major device number: got %v, wanted %v", stat.RdevMajor, f.major)
- }
- if stat.RdevMinor != f.minor {
- t.Errorf("minor device number: got %v, wanted %v", stat.RdevMinor, f.minor)
- }
- wantMode := f.perms
- switch f.kind {
- case vfs.CharDevice:
- wantMode |= linux.S_IFCHR
- case vfs.BlockDevice:
- wantMode |= linux.S_IFBLK
- }
- if stat.Mode != wantMode {
- t.Errorf("device file mode: got %v, wanted %v", stat.Mode, wantMode)
- }
- }
-}
diff --git a/pkg/sentry/fsimpl/eventfd/BUILD b/pkg/sentry/fsimpl/eventfd/BUILD
deleted file mode 100644
index ea167d38c..000000000
--- a/pkg/sentry/fsimpl/eventfd/BUILD
+++ /dev/null
@@ -1,33 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "eventfd",
- srcs = ["eventfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fdnotifier",
- "//pkg/log",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "eventfd_test",
- size = "small",
- srcs = ["eventfd_test.go"],
- library = ":eventfd",
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/vfs",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go b/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go
new file mode 100644
index 000000000..3af030742
--- /dev/null
+++ b/pkg/sentry/fsimpl/eventfd/eventfd_state_autogen.go
@@ -0,0 +1,55 @@
+// automatically generated by stateify.
+
+package eventfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (efd *EventFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/eventfd.EventFileDescription"
+}
+
+func (efd *EventFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "queue",
+ "val",
+ "semMode",
+ "hostfd",
+ }
+}
+
+func (efd *EventFileDescription) beforeSave() {}
+
+func (efd *EventFileDescription) StateSave(stateSinkObject state.Sink) {
+ efd.beforeSave()
+ stateSinkObject.Save(0, &efd.vfsfd)
+ stateSinkObject.Save(1, &efd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &efd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &efd.NoLockFD)
+ stateSinkObject.Save(4, &efd.queue)
+ stateSinkObject.Save(5, &efd.val)
+ stateSinkObject.Save(6, &efd.semMode)
+ stateSinkObject.Save(7, &efd.hostfd)
+}
+
+func (efd *EventFileDescription) afterLoad() {}
+
+func (efd *EventFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &efd.vfsfd)
+ stateSourceObject.Load(1, &efd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &efd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &efd.NoLockFD)
+ stateSourceObject.Load(4, &efd.queue)
+ stateSourceObject.Load(5, &efd.val)
+ stateSourceObject.Load(6, &efd.semMode)
+ stateSourceObject.Load(7, &efd.hostfd)
+}
+
+func init() {
+ state.Register((*EventFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd_test.go b/pkg/sentry/fsimpl/eventfd/eventfd_test.go
deleted file mode 100644
index 49916fa81..000000000
--- a/pkg/sentry/fsimpl/eventfd/eventfd_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package eventfd
-
-import (
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-func TestEventFD(t *testing.T) {
- initVals := []uint64{
- 0,
- // Using a non-zero initial value verifies that writing to an
- // eventfd signals when the eventfd's counter was already
- // non-zero.
- 343,
- }
-
- for _, initVal := range initVals {
- ctx := contexttest.Context(t)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
-
- // Make a new eventfd that is writable.
- eventfd, err := New(ctx, vfsObj, initVal, false, linux.O_RDWR)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- defer eventfd.DecRef(ctx)
-
- // Register a callback for a write event.
- w, ch := waiter.NewChannelEntry(nil)
- eventfd.EventRegister(&w, waiter.EventIn)
- defer eventfd.EventUnregister(&w)
-
- data := []byte("00000124")
- // Create and submit a write request.
- n, err := eventfd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatal(err)
- }
- if n != 8 {
- t.Errorf("eventfd.write wrote %d bytes, not full int64", n)
- }
-
- // Check if the callback fired due to the write event.
- select {
- case <-ch:
- default:
- t.Errorf("Didn't get notified of EventIn after write")
- }
- }
-}
-
-func TestEventFDStat(t *testing.T) {
- ctx := contexttest.Context(t)
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
-
- // Make a new eventfd that is writable.
- eventfd, err := New(ctx, vfsObj, 0, false, linux.O_RDWR)
- if err != nil {
- t.Fatalf("New() failed: %v", err)
- }
- defer eventfd.DecRef(ctx)
-
- statx, err := eventfd.Stat(ctx, vfs.StatOptions{
- Mask: linux.STATX_BASIC_STATS,
- })
- if err != nil {
- t.Fatalf("eventfd.Stat failed: %v", err)
- }
- if statx.Size != 0 {
- t.Errorf("eventfd size should be 0")
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/BUILD b/pkg/sentry/fsimpl/ext/BUILD
deleted file mode 100644
index 7b1eec3da..000000000
--- a/pkg/sentry/fsimpl/ext/BUILD
+++ /dev/null
@@ -1,104 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "dirent_list",
- out = "dirent_list.go",
- package = "ext",
- prefix = "dirent",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dirent",
- "Linker": "*dirent",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "ext",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "ext",
- srcs = [
- "block_map_file.go",
- "dentry.go",
- "directory.go",
- "dirent_list.go",
- "ext.go",
- "extent_file.go",
- "file_description.go",
- "filesystem.go",
- "fstree.go",
- "inode.go",
- "regular_file.go",
- "symlink.go",
- "utils.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/binary",
- "//pkg/context",
- "//pkg/fd",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/syscalls/linux",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "ext_test",
- size = "small",
- srcs = [
- "block_map_test.go",
- "ext_test.go",
- "extent_test.go",
- ],
- data = [
- "//pkg/sentry/fsimpl/ext:assets/bigfile.txt",
- "//pkg/sentry/fsimpl/ext:assets/file.txt",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext2",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext3",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext4",
- ],
- library = ":ext",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/marshal/primitive",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/test/testutil",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/README.md b/pkg/sentry/fsimpl/ext/README.md
deleted file mode 100644
index af00cfda8..000000000
--- a/pkg/sentry/fsimpl/ext/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-## EXT(2/3/4) File System
-
-This is a filesystem driver which supports ext2, ext3 and ext4 filesystems.
-Linux has specialized drivers for each variant but none which supports all. This
-library takes advantage of ext's backward compatibility and understands the
-internal organization of on-disk structures to support all variants.
-
-This driver implementation diverges from the Linux implementations in being more
-forgiving about versioning. For instance, if a filesystem contains both extent
-based inodes and classical block map based inodes, this driver will not complain
-and interpret them both correctly. While in Linux this would be an issue. This
-blurs the line between the three ext fs variants.
-
-Ext2 is considered deprecated as of Red Hat Enterprise Linux 7, and ext3 has
-been superseded by ext4 by large performance gains. Thus it is recommended to
-upgrade older filesystem images to ext4 using e2fsprogs for better performance.
-
-### Read Only
-
-This driver currently only allows read only operations. A lot of the design
-decisions are based on this feature. There are plans to implement write (the
-process for which is documented in the future work section).
-
-### Performance
-
-One of the biggest wins about this driver is that it directly talks to the
-underlying block device (or whatever persistent storage is being used), instead
-of making expensive RPCs to a gofer.
-
-Another advantage is that ext fs supports fast concurrent reads. Currently the
-device is represented using a `io.ReaderAt` which allows for concurrent reads.
-All reads are directly passed to the device driver which intelligently serves
-the read requests in the optimal order. There is no congestion due to locking
-while reading in the filesystem level.
-
-Reads are optimized further in the way file data is transferred over to user
-memory. Ext fs directly copies over file data from disk into user memory with no
-additional allocations on the way. We can only get faster by preloading file
-data into memory (see future work section).
-
-The internal structures used to represent files, inodes and file descriptors use
-a lot of inheritance. With the level of indirection that an interface adds with
-an internal pointer, it can quickly fragment a structure across memory. As this
-runs along side a full blown kernel (which is memory intensive), having a
-fragmented struct might hurt performance. Hence these internal structures,
-though interfaced, are tightly packed in memory using the same inheritance
-pattern that pkg/sentry/vfs uses. The pkg/sentry/fsimpl/ext/disklayout package
-makes an execption to this pattern for reasons documented in the package.
-
-### Security
-
-This driver also intends to help sandbox the container better by reducing the
-surface of the host kernel that the application touches. It prevents the
-application from exploiting vulnerabilities in the host filesystem driver. All
-`io.ReaderAt.ReadAt()` calls are translated to `pread(2)` which are directly
-passed to the device driver in the kernel. Hence this reduces the surface for
-attack.
-
-The application can not affect any host filesystems other than the one passed
-via block device by the user.
-
-### Future Work
-
-#### Write
-
-To support write operations we would need to modify the block device underneath.
-Currently, the driver does not modify the device at all, not even for updating
-the access times for reads. Modifying the filesystem incorrectly can corrupt it
-and render it unreadable for other correct ext(x) drivers. Hence caution must be
-maintained while modifying metadata structures.
-
-Ext4 specifically is built for performance and has added a lot of complexity as
-to how metadata structures are modified. For instance, files that are organized
-via an extent tree which must be balanced and file data blocks must be placed in
-the same extent as much as possible to increase locality. Such properties must
-be maintained while modifying the tree.
-
-Ext filesystems boast a lot about locality, which plays a big role in them being
-performant. The block allocation algorithm in Linux does a good job in keeping
-related data together. This behavior must be maintained as much as possible,
-else we might end up degrading the filesystem performance over time.
-
-Ext4 also supports a wide variety of features which are specialized for varying
-use cases. Implementing all of them can get difficult very quickly.
-
-Ext(x) checksums all its metadata structures to check for corruption, so
-modification of any metadata struct must correspond with re-checksumming the
-struct. Linux filesystem drivers also order on-disk updates intelligently to not
-corrupt the filesystem and also remain performant. The in-memory metadata
-structures must be kept in sync with what is on disk.
-
-There is also replication of some important structures across the filesystem.
-All replicas must be updated when their original copy is updated. There is also
-provisioning for snapshotting which must be kept in mind, although it should not
-affect this implementation unless we allow users to create filesystem snapshots.
-
-Ext4 also introduced journaling (jbd2). The journal must be updated
-appropriately.
-
-#### Performance
-
-To improve performance we should implement a buffer cache, and optionally, read
-ahead for small files. While doing so we must also keep in mind the memory usage
-and have a reasonable cap on how much file data we want to hold in memory.
-
-#### Features
-
-Our current implementation will work with most ext4 filesystems for readonly
-purposed. However, the following features are not supported yet:
-
-- Journal
-- Snapshotting
-- Extended Attributes
-- Hash Tree Directories
-- Meta Block Groups
-- Multiple Mount Protection
-- Bigalloc
diff --git a/pkg/sentry/fsimpl/ext/assets/README.md b/pkg/sentry/fsimpl/ext/assets/README.md
deleted file mode 100644
index 6f1e81b3a..000000000
--- a/pkg/sentry/fsimpl/ext/assets/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-### Tiny Ext(2/3/4) Images
-
-The images are of size 64Kb which supports 64 1k blocks and 16 inodes. This is
-the smallest size mkfs.ext(2/3/4) works with.
-
-These images were generated using the following commands.
-
-```bash
-fallocate -l 64K tiny.ext$VERSION
-mkfs.ext$VERSION -j tiny.ext$VERSION
-```
-
-where `VERSION` is `2`, `3` or `4`.
-
-You can mount it using:
-
-```bash
-sudo mount -o loop tiny.ext$VERSION $MOUNTPOINT
-```
-
-`file.txt`, `bigfile.txt` and `symlink.txt` were added to this image by just
-mounting it and copying (while preserving links) those files to the mountpoint
-directory using:
-
-```bash
-sudo cp -P {file.txt,symlink.txt,bigfile.txt} $MOUNTPOINT
-```
-
-The files in this directory mirror the contents and organisation of the files
-stored in the image.
-
-You can umount the filesystem using:
-
-```bash
-sudo umount $MOUNTPOINT
-```
diff --git a/pkg/sentry/fsimpl/ext/assets/bigfile.txt b/pkg/sentry/fsimpl/ext/assets/bigfile.txt
deleted file mode 100644
index 3857cf516..000000000
--- a/pkg/sentry/fsimpl/ext/assets/bigfile.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus faucibus eleifend orci, ut ornare nibh faucibus eu. Cras at condimentum massa. Nullam luctus, elit non porttitor congue, sapien diam feugiat sapien, sed eleifend nulla mauris non arcu. Sed lacinia mauris magna, eu mollis libero varius sit amet. Donec mollis, quam convallis commodo posuere, dolor nisi placerat nisi, in faucibus augue mi eu lorem. In pharetra consectetur faucibus. Ut euismod ex efficitur egestas tincidunt. Maecenas condimentum ut ante in rutrum. Vivamus sed arcu tempor, faucibus turpis et, lacinia diam.
-
-Sed in lacus vel nisl interdum bibendum in sed justo. Nunc tellus risus, molestie vitae arcu sed, molestie tempus ligula. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc risus neque, volutpat et ante non, ullamcorper condimentum ante. Aliquam sed metus in urna condimentum convallis. Vivamus ut libero mauris. Proin mollis posuere consequat. Vestibulum placerat mollis est et pulvinar.
-
-Donec rutrum odio ac diam pharetra, id fermentum magna cursus. Pellentesque in dapibus elit, et condimentum orci. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Suspendisse euismod dapibus est, id vestibulum mauris. Nulla facilisi. Nulla cursus gravida nisi. Phasellus vestibulum rutrum lectus, a dignissim mauris hendrerit vitae. In at elementum mauris. Integer vel efficitur velit. Nullam fringilla sapien mi, quis luctus neque efficitur ac. Aenean nec quam dapibus nunc commodo pharetra. Proin sapien mi, fermentum aliquet vulputate non, aliquet porttitor diam. Quisque lacinia, urna et finibus fermentum, nunc lacus vehicula ex, sed congue metus lectus ac quam. Aliquam erat volutpat. Suspendisse sodales, dolor ut tincidunt finibus, augue erat varius tellus, a interdum erat sem at nunc. Vestibulum cursus iaculis sapien, vitae feugiat dui auctor quis.
-
-Pellentesque nec maximus nulla, eu blandit diam. Maecenas quis arcu ornare, congue ante at, vehicula ipsum. Praesent feugiat mauris rutrum sem fermentum, nec luctus ipsum placerat. Pellentesque placerat ipsum at dignissim fringilla. Vivamus et posuere sem, eget hendrerit felis. Aenean vulputate, augue vel mollis feugiat, justo ipsum mollis dolor, eu mollis elit neque ut ipsum. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Fusce bibendum sem quam, vulputate laoreet mi dapibus imperdiet. Sed a purus non nibh pretium aliquet. Integer eget luctus augue, vitae tincidunt magna. Ut eros enim, egestas eu nulla et, lobortis egestas arcu. Cras id ipsum ac justo lacinia rutrum. Vivamus lectus leo, ultricies sed justo at, pellentesque feugiat magna. Ut sollicitudin neque elit, vel ornare mauris commodo id.
-
-Duis dapibus orci et sapien finibus finibus. Mauris eleifend, lacus at vestibulum maximus, quam ligula pharetra erat, sit amet dapibus neque elit vitae neque. In bibendum sollicitudin erat, eget ultricies tortor malesuada at. Sed sit amet orci turpis. Donec feugiat ligula nibh, molestie tincidunt lectus elementum id. Donec volutpat maximus nibh, in vulputate felis posuere eu. Cras tincidunt ullamcorper lacus. Phasellus porta lorem auctor, congue magna a, commodo elit.
-
-Etiam auctor mi quis elit sodales, eu pulvinar arcu condimentum. Aenean imperdiet risus et dapibus tincidunt. Nullam tincidunt dictum dui, sed commodo urna rutrum id. Ut mollis libero vel elit laoreet bibendum. Quisque arcu arcu, tincidunt at ultricies id, vulputate nec metus. In tristique posuere quam sit amet volutpat. Vivamus scelerisque et nunc at dapibus. Fusce finibus libero ut ligula pretium rhoncus. Mauris non elit in arcu finibus imperdiet. Pellentesque nec massa odio. Proin rutrum mauris non sagittis efficitur. Aliquam auctor quam at dignissim faucibus. Ut eget ligula in magna posuere ultricies vitae sit amet turpis. Duis maximus odio nulla. Donec gravida sem tristique tempus scelerisque.
-
-Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce pharetra magna vulputate aliquet tempus. Duis id hendrerit arcu. Quisque ut ex elit. Integer velit orci, venenatis ut sapien ac, placerat porttitor dui. Interdum et malesuada fames ac ante ipsum primis in faucibus. Nunc hendrerit cursus diam, hendrerit finibus ipsum scelerisque ut. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.
-
-Nulla non euismod neque. Phasellus vel sapien eu metus pulvinar rhoncus. Suspendisse eu mollis tellus, quis vestibulum tortor. Maecenas interdum dolor sed nulla fermentum maximus. Donec imperdiet ullamcorper condimentum. Nam quis nibh ante. Praesent quis tellus ut tortor pulvinar blandit sit amet ut sapien. Vestibulum est orci, pellentesque vitae tristique sit amet, tristique non felis.
-
-Vivamus sodales pellentesque varius. Sed vel tempus ligula. Nulla tristique nisl vel dui facilisis, ac sodales augue hendrerit. Proin augue nisi, vestibulum quis augue nec, sagittis tincidunt velit. Vestibulum euismod, nulla nec sodales faucibus, urna sapien vulputate magna, id varius metus sapien ut neque. Duis in mollis urna, in scelerisque enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc condimentum dictum turpis, et egestas neque dapibus eget. Quisque fringilla, dui eu venenatis eleifend, erat nibh lacinia urna, at lacinia lacus sapien eu dui. Duis eu erat ut mi lacinia convallis a sed ex.
-
-Fusce elit metus, tincidunt nec eleifend a, hendrerit nec ligula. Duis placerat finibus sollicitudin. In euismod porta tellus, in luctus justo bibendum bibendum. Maecenas at magna eleifend lectus tincidunt suscipit ut a ligula. Nulla tempor accumsan felis, fermentum dapibus est eleifend vitae. Mauris urna sem, fringilla at ultricies non, ultrices in arcu. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam vehicula nunc at laoreet imperdiet. Nunc tristique ut risus id aliquet. Integer eleifend massa orci.
-
-Vestibulum sed ante sollicitudin nisi fringilla bibendum nec vel quam. Sed pretium augue eu ligula congue pulvinar. Donec vitae magna tincidunt, pharetra lacus id, convallis nulla. Cras viverra nisl nisl, varius convallis leo vulputate nec. Morbi at consequat dui, sed aliquet metus. Sed suscipit fermentum mollis. Maecenas nec mi sodales, tincidunt purus in, tristique mauris. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec interdum mi in velit efficitur, quis ultrices ex imperdiet. Sed vestibulum, magna ut tristique pretium, mi ipsum placerat tellus, non tempor enim augue et ex. Pellentesque eget felis quis ante sodales viverra ac sed lacus. Donec suscipit tempus massa, eget laoreet massa molestie at.
-
-Aenean fringilla dui non aliquet consectetur. Fusce cursus quam nec orci hendrerit faucibus. Donec consequat suscipit enim, non volutpat lectus auctor interdum. Proin lorem purus, maximus vel orci vitae, suscipit egestas turpis. Donec risus urna, congue a sem eu, aliquet placerat odio. Morbi gravida tristique turpis, quis efficitur enim. Nunc interdum gravida ipsum vel facilisis. Nunc congue finibus sollicitudin. Quisque euismod aliquet lectus et tincidunt. Curabitur ultrices sem ut mi fringilla fermentum. Morbi pretium, nisi sit amet dapibus congue, dolor enim consectetur risus, a interdum ligula odio sed odio. Quisque facilisis, mi at suscipit gravida, nunc sapien cursus justo, ut luctus odio nulla quis leo. Integer condimentum lobortis mauris, non egestas tellus lobortis sit amet.
-
-In sollicitudin velit ac ante vehicula, vitae varius tortor mollis. In hac habitasse platea dictumst. Quisque et orci lorem. Integer malesuada fringilla luctus. Pellentesque malesuada, mi non lobortis porttitor, ante ligula vulputate ante, nec dictum risus eros sit amet sapien. Nulla aliquam lorem libero, ac varius nulla tristique eget. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Ut pellentesque mauris orci, vel consequat mi varius a. Ut sit amet elit vulputate, lacinia metus non, fermentum nisl. Pellentesque eu nisi sed quam egestas blandit. Duis sit amet lobortis dolor. Donec consectetur sem interdum, tristique elit sit amet, sodales lacus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Fusce id aliquam augue. Sed pretium congue risus vitae lacinia. Vestibulum non vulputate risus, ut malesuada justo.
-
-Sed odio elit, consectetur ac mauris quis, consequat commodo libero. Fusce sodales velit vulputate pulvinar fermentum. Donec iaculis nec nisl eget faucibus. Mauris at dictum velit. Donec fermentum lectus eu viverra volutpat. Aliquam consequat facilisis lorem, cursus consequat dui bibendum ullamcorper. Pellentesque nulla magna, imperdiet at magna et, cursus egestas enim. Nullam semper molestie lectus sit amet semper. Duis eget tincidunt est. Integer id neque risus. Integer ultricies hendrerit vestibulum. Donec blandit blandit sagittis. Nunc consectetur vitae nisi consectetur volutpat.
-
-Nulla id lorem fermentum, efficitur magna a, hendrerit dui. Vivamus sagittis orci gravida, bibendum quam eget, molestie est. Phasellus nec enim tincidunt, volutpat sapien non, laoreet diam. Nulla posuere enim nec porttitor lobortis. Donec auctor odio ut orci eleifend, ut eleifend purus convallis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut hendrerit, purus eget viverra tincidunt, sem magna imperdiet libero, et aliquam turpis neque vitae elit. Maecenas semper varius iaculis. Cras non lorem quis quam bibendum eleifend in et libero. Curabitur at purus mauris. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus porta diam sed elit eleifend gravida.
-
-Nulla facilisi. Ut ultricies diam vel diam consectetur, vel porta augue molestie. Fusce interdum sapien et metus facilisis pellentesque. Nulla convallis sem at nunc vehicula facilisis. Nam ac rutrum purus. Nunc bibendum, dolor sit amet tempus ullamcorper, lorem leo tempor sem, id fringilla nunc augue scelerisque augue. Nullam sit amet rutrum nisl. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec sed mauris gravida eros vehicula sagittis at eget orci. Cras elementum, eros at accumsan bibendum, libero neque blandit purus, vitae vestibulum libero massa ac nibh. Integer at placerat nulla. Mauris eu eleifend orci. Aliquam consequat ligula vitae erat porta lobortis. Duis fermentum elit ac aliquet ornare.
-
-Mauris eget cursus tellus, eget sodales purus. Aliquam malesuada, augue id vulputate finibus, nisi ex bibendum nisl, sit amet laoreet quam urna a dolor. Nullam ultricies, sapien eu laoreet consequat, erat eros dignissim diam, ultrices sodales lectus mauris et leo. Morbi lacinia eu ante at tempus. Sed iaculis finibus magna malesuada efficitur. Donec faucibus erat sit amet elementum feugiat. Praesent a placerat nisi. Etiam lacinia gravida diam, et sollicitudin sapien tincidunt ut.
-
-Maecenas felis quam, tincidunt vitae venenatis scelerisque, viverra vitae odio. Phasellus enim neque, ultricies suscipit malesuada sit amet, vehicula sit amet purus. Nulla placerat sit amet dui vel tincidunt. Nam quis neque vel magna commodo egestas. Vestibulum sagittis rutrum lorem ut congue. Maecenas vel ultrices tellus. Donec efficitur, urna ac consequat iaculis, lorem felis pharetra eros, eget faucibus orci lectus sit amet arcu.
-
-Ut a tempus nisi. Nulla facilisi. Praesent vulputate maximus mi et dapibus. Sed sit amet libero ac augue hendrerit efficitur in a sapien. Mauris placerat velit sit amet tellus sollicitudin faucibus. Donec egestas a magna ac suscipit. Duis enim sapien, mollis sed egestas et, vestibulum vel leo.
-
-Proin quis dapibus dui. Donec eu tincidunt nunc. Vivamus eget purus consectetur, maximus ante vitae, tincidunt elit. Aenean mattis dolor a gravida aliquam. Praesent quis tellus id sem maximus vulputate nec sed nulla. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur metus nulla, volutpat volutpat est eu, hendrerit congue erat. Aliquam sollicitudin augue ante. Sed sollicitudin, magna eu consequat elementum, mi augue ullamcorper felis, molestie imperdiet erat metus iaculis est. Proin ac tortor nisi. Pellentesque quis nisi risus. Integer enim sapien, tincidunt quis tortor id, accumsan venenatis mi. Nulla facilisi.
-
-Cras pretium sit amet quam congue maximus. Morbi lacus libero, imperdiet commodo massa sed, scelerisque placerat libero. Cras nisl nisi, consectetur sed bibendum eu, venenatis at enim. Proin sodales justo at quam aliquam, a consectetur mi ornare. Donec porta ac est sit amet efficitur. Suspendisse vestibulum tortor id neque imperdiet, id lacinia risus vehicula. Phasellus ac eleifend purus. Mauris vel gravida ante. Aliquam vitae lobortis risus. Sed vehicula consectetur tincidunt. Nam et justo vitae purus molestie consequat. Pellentesque ipsum ex, convallis quis blandit non, gravida et urna. Donec diam ligula amet.
diff --git a/pkg/sentry/fsimpl/ext/assets/file.txt b/pkg/sentry/fsimpl/ext/assets/file.txt
deleted file mode 100644
index 980a0d5f1..000000000
--- a/pkg/sentry/fsimpl/ext/assets/file.txt
+++ /dev/null
@@ -1 +0,0 @@
-Hello World!
diff --git a/pkg/sentry/fsimpl/ext/assets/symlink.txt b/pkg/sentry/fsimpl/ext/assets/symlink.txt
deleted file mode 120000
index 4c330738c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/symlink.txt
+++ /dev/null
@@ -1 +0,0 @@
-file.txt \ No newline at end of file
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext2 b/pkg/sentry/fsimpl/ext/assets/tiny.ext2
deleted file mode 100644
index 381ade9bf..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext2
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext3 b/pkg/sentry/fsimpl/ext/assets/tiny.ext3
deleted file mode 100644
index 0e97a324c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext3
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext4 b/pkg/sentry/fsimpl/ext/assets/tiny.ext4
deleted file mode 100644
index a6859736d..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext4
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/benchmark/BUILD b/pkg/sentry/fsimpl/ext/benchmark/BUILD
deleted file mode 100644
index 6c5a559fd..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "benchmark_test",
- size = "small",
- srcs = ["benchmark_test.go"],
- deps = [
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go b/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
deleted file mode 100644
index 2ee7cc7ac..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// These benchmarks emulate memfs benchmarks. Ext4 images must be created
-// before this benchmark is run using the `make_deep_ext4.sh` script at
-// /tmp/image-{depth}.ext4 for all the depths tested below.
-//
-// The benchmark itself cannot run the script because the script requires
-// sudo privileges to create the file system images.
-package benchmark_test
-
-import (
- "fmt"
- "os"
- "runtime"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-var depths = []int{1, 2, 3, 8, 64, 100}
-
-const filename = "file.txt"
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(b *testing.B, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- f, err := os.Open(imagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, nil, nil, nil, err
- }
- vfsObj.MustRegisterFilesystemType("extfs", ext.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, imagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// mount mounts extfs at the path operation passed. Returns a tear down
-// function which must be called after the test is run for clean up.
-func mount(b *testing.B, imagePath string, vfsfs *vfs.VirtualFilesystem, pop *vfs.PathOperation) func() {
- b.Helper()
-
- f, err := os.Open(imagePath)
- if err != nil {
- b.Fatalf("could not open image at %s: %v", imagePath, err)
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- if _, err := vfsfs.MountAt(ctx, creds, imagePath, pop, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- }); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- return func() {
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
-}
-
-// BenchmarkVFS2Ext4fsStat emulates BenchmarkVFS2MemfsStat.
-func BenchmarkVFS2Ext4fsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", depth))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
-
-// BenchmarkVFS2ExtfsMountStat emulates BenchmarkVFS2MemfsMountStat.
-func BenchmarkVFS2ExtfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- // Create root extfs with depth 1 so we can mount extfs again at /1/.
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", 1))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- mountPointName := "/1/"
- pop := vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(mountPointName),
- }
-
- // Save the mount point for later use.
- mountPoint, err := vfsfs.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
-
- // Create extfs submount.
- mountTearDown := mount(b, fmt.Sprintf("/tmp/image-%d.ext4", depth), vfsfs, &pop)
- defer mountTearDown()
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteString(mountPointName)
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check. touch(1) always creates files of size 0 (empty).
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh b/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
deleted file mode 100755
index d0910da1f..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script creates an ext4 image with $1 depth of directories and a file in
-# the inner most directory. The created file is at path /1/2/.../depth/file.txt.
-# The ext4 image is written to $2. The image is temporarily mounted at
-# /tmp/mountpoint. This script must be run with sudo privileges.
-
-# Usage:
-# sudo bash make_deep_ext4.sh {depth} {output path}
-
-# Check positional arguments.
-if [ "$#" -ne 2 ]; then
- echo "Usage: sudo bash make_deep_ext4.sh {depth} {output path}"
- exit 1
-fi
-
-# Make sure depth is a non-negative number.
-if ! [[ "$1" =~ ^[0-9]+$ ]]; then
- echo "Depth must be a non-negative number."
- exit 1
-fi
-
-# Create a 1 MB filesystem image at the requested output path.
-rm -f $2
-fallocate -l 1M $2
-if [ $? -ne 0 ]; then
- echo "fallocate failed"
- exit $?
-fi
-
-# Convert that blank into an ext4 image.
-mkfs.ext4 -j $2
-if [ $? -ne 0 ]; then
- echo "mkfs.ext4 failed"
- exit $?
-fi
-
-# Mount the image.
-MOUNTPOINT=/tmp/mountpoint
-mkdir -p $MOUNTPOINT
-mount -o loop $2 $MOUNTPOINT
-if [ $? -ne 0 ]; then
- echo "mount failed"
- exit $?
-fi
-
-# Create nested directories and the file.
-if [ "$1" -eq 0 ]; then
- FILEPATH=$MOUNTPOINT/file.txt
-else
- FILEPATH=$MOUNTPOINT/$(seq -s '/' 1 $1)/file.txt
-fi
-mkdir -p $(dirname $FILEPATH) || exit
-touch $FILEPATH
-
-# Clean up.
-umount $MOUNTPOINT
-rm -rf $MOUNTPOINT
diff --git a/pkg/sentry/fsimpl/ext/block_map_file.go b/pkg/sentry/fsimpl/ext/block_map_file.go
deleted file mode 100644
index 1165234f9..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_file.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "math"
-
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-const (
- // numDirectBlks is the number of direct blocks in ext block map inodes.
- numDirectBlks = 12
-)
-
-// blockMapFile is a type of regular file which uses direct/indirect block
-// addressing to store file data. This was deprecated in ext4.
-type blockMapFile struct {
- regFile regularFile
-
- // directBlks are the direct blocks numbers. The physical blocks pointed by
- // these holds file data. Contains file blocks 0 to 11.
- directBlks [numDirectBlks]primitive.Uint32
-
- // indirectBlk is the physical block which contains (blkSize/4) direct block
- // numbers (as uint32 integers).
- indirectBlk primitive.Uint32
-
- // doubleIndirectBlk is the physical block which contains (blkSize/4) indirect
- // block numbers (as uint32 integers).
- doubleIndirectBlk primitive.Uint32
-
- // tripleIndirectBlk is the physical block which contains (blkSize/4) doubly
- // indirect block numbers (as uint32 integers).
- tripleIndirectBlk primitive.Uint32
-
- // coverage at (i)th index indicates the amount of file data a node at
- // height (i) covers. Height 0 is the direct block.
- coverage [4]uint64
-}
-
-// Compiles only if blockMapFile implements io.ReaderAt.
-var _ io.ReaderAt = (*blockMapFile)(nil)
-
-// newBlockMapFile is the blockMapFile constructor. It initializes the file to
-// physical blocks map with (at most) the first 12 (direct) blocks.
-func newBlockMapFile(args inodeArgs) (*blockMapFile, error) {
- file := &blockMapFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
-
- for i := uint(0); i < 4; i++ {
- file.coverage[i] = getCoverage(file.regFile.inode.blkSize, i)
- }
-
- blkMap := file.regFile.inode.diskInode.Data()
- for i := 0; i < numDirectBlks; i++ {
- file.directBlks[i].UnmarshalBytes(blkMap[i*4 : (i+1)*4])
- }
- file.indirectBlk.UnmarshalBytes(blkMap[numDirectBlks*4 : (numDirectBlks+1)*4])
- file.doubleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+1)*4 : (numDirectBlks+2)*4])
- file.tripleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+2)*4 : (numDirectBlks+3)*4])
- return file, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *blockMapFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, syserror.EINVAL
- }
-
- offset := uint64(off)
- size := f.regFile.inode.diskInode.Size()
- if offset >= size {
- return 0, io.EOF
- }
-
- // dirBlksEnd is the file offset until which direct blocks cover file data.
- // Direct blocks cover 0 <= file offset < dirBlksEnd.
- dirBlksEnd := numDirectBlks * f.coverage[0]
-
- // indirBlkEnd is the file offset until which the indirect block covers file
- // data. The indirect block covers dirBlksEnd <= file offset < indirBlkEnd.
- indirBlkEnd := dirBlksEnd + f.coverage[1]
-
- // doubIndirBlkEnd is the file offset until which the double indirect block
- // covers file data. The double indirect block covers the range
- // indirBlkEnd <= file offset < doubIndirBlkEnd.
- doubIndirBlkEnd := indirBlkEnd + f.coverage[2]
-
- read := 0
- toRead := len(dst)
- if uint64(toRead)+offset > size {
- toRead = int(size - offset)
- }
- for read < toRead {
- var err error
- var curR int
-
- // Figure out which block to delegate the read to.
- switch {
- case offset < dirBlksEnd:
- // Direct block.
- curR, err = f.read(uint32(f.directBlks[offset/f.regFile.inode.blkSize]), offset%f.regFile.inode.blkSize, 0, dst[read:])
- case offset < indirBlkEnd:
- // Indirect block.
- curR, err = f.read(uint32(f.indirectBlk), offset-dirBlksEnd, 1, dst[read:])
- case offset < doubIndirBlkEnd:
- // Doubly indirect block.
- curR, err = f.read(uint32(f.doubleIndirectBlk), offset-indirBlkEnd, 2, dst[read:])
- default:
- // Triply indirect block.
- curR, err = f.read(uint32(f.tripleIndirectBlk), offset-doubIndirBlkEnd, 3, dst[read:])
- }
-
- read += curR
- offset += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- if read < len(dst) {
- return read, io.EOF
- }
- return read, nil
-}
-
-// read is the recursive step of the ReadAt function. It relies on knowing the
-// current node's location on disk (curPhyBlk) and its height in the block map
-// tree. A height of 0 shows that the current node is actually holding file
-// data. relFileOff tells the offset from which we need to start to reading
-// under the current node. It is completely relative to the current node.
-func (f *blockMapFile) read(curPhyBlk uint32, relFileOff uint64, height uint, dst []byte) (int, error) {
- curPhyBlkOff := int64(curPhyBlk) * int64(f.regFile.inode.blkSize)
- if height == 0 {
- toRead := int(f.regFile.inode.blkSize - relFileOff)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], curPhyBlkOff+int64(relFileOff))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
- }
-
- childCov := f.coverage[height-1]
- startIdx := relFileOff / childCov
- endIdx := f.regFile.inode.blkSize / 4 // This is exclusive.
- wantEndIdx := (relFileOff + uint64(len(dst))) / childCov
- wantEndIdx++ // Make this exclusive.
- if wantEndIdx < endIdx {
- endIdx = wantEndIdx
- }
-
- read := 0
- curChildOff := relFileOff % childCov
- for i := startIdx; i < endIdx; i++ {
- var childPhyBlk primitive.Uint32
- err := readFromDisk(f.regFile.inode.fs.dev, curPhyBlkOff+int64(i*4), &childPhyBlk)
- if err != nil {
- return read, err
- }
-
- n, err := f.read(uint32(childPhyBlk), curChildOff, height-1, dst[read:])
- read += n
- if err != nil {
- return read, err
- }
-
- curChildOff = 0
- }
-
- return read, nil
-}
-
-// getCoverage returns the number of bytes a node at the given height covers.
-// Height 0 is the file data block itself. Height 1 is the indirect block.
-//
-// Formula: blkSize * ((blkSize / 4)^height)
-func getCoverage(blkSize uint64, height uint) uint64 {
- return blkSize * uint64(math.Pow(float64(blkSize/4), float64(height)))
-}
diff --git a/pkg/sentry/fsimpl/ext/block_map_test.go b/pkg/sentry/fsimpl/ext/block_map_test.go
deleted file mode 100644
index ed98b482e..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-// These consts are for mocking the block map tree.
-const (
- mockBMBlkSize = uint32(16)
- mockBMDiskSize = 2500
-)
-
-// TestBlockMapReader stress tests block map reader functionality. It performs
-// random length reads from all possible positions in the block map structure.
-func TestBlockMapReader(t *testing.T) {
- mockBMFile, want := blockMapSetUp(t)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockBMFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// blkNumGen is a number generator which gives block numbers for building the
-// block map file on disk. It gives unique numbers in a random order which
-// facilitates in creating an extremely fragmented filesystem.
-type blkNumGen struct {
- nums []uint32
-}
-
-// newBlkNumGen is the blkNumGen constructor.
-func newBlkNumGen() *blkNumGen {
- blkNums := &blkNumGen{}
- lim := mockBMDiskSize / mockBMBlkSize
- blkNums.nums = make([]uint32, lim)
- for i := uint32(0); i < lim; i++ {
- blkNums.nums[i] = i
- }
-
- rand.Shuffle(int(lim), func(i, j int) {
- blkNums.nums[i], blkNums.nums[j] = blkNums.nums[j], blkNums.nums[i]
- })
- return blkNums
-}
-
-// next returns the next random block number.
-func (n *blkNumGen) next() uint32 {
- ret := n.nums[0]
- n.nums = n.nums[1:]
- return ret
-}
-
-// blockMapSetUp creates a mock disk and a block map file. It initializes the
-// block map file with 12 direct block, 1 indirect block, 1 double indirect
-// block and 1 triple indirect block (basically fill it till the rim). It
-// initializes the disk to reflect the inode. Also returns the file data that
-// the inode covers and that is written to disk.
-func blockMapSetUp(t *testing.T) (*blockMapFile, []byte) {
- mockDisk := make([]byte, mockBMDiskSize)
- var fileData []byte
- blkNums := newBlkNumGen()
- off := 0
- data := make([]byte, (numDirectBlks+3)*(*primitive.Uint32)(nil).SizeBytes())
-
- // Write the direct blocks.
- for i := 0; i < numDirectBlks; i++ {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(data[off:])
- off += curBlkNum.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(curBlkNum), 0, blkNums)...)
- }
-
- // Write to indirect block.
- indirectBlk := primitive.Uint32(blkNums.next())
- indirectBlk.MarshalBytes(data[off:])
- off += indirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(indirectBlk), 1, blkNums)...)
-
- // Write to double indirect block.
- doublyIndirectBlk := primitive.Uint32(blkNums.next())
- doublyIndirectBlk.MarshalBytes(data[off:])
- off += doublyIndirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(doublyIndirectBlk), 2, blkNums)...)
-
- // Write to triple indirect block.
- triplyIndirectBlk := primitive.Uint32(blkNums.next())
- triplyIndirectBlk.MarshalBytes(data[off:])
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(triplyIndirectBlk), 3, blkNums)...)
-
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: getMockBMFileFize(),
- },
- },
- blkSize: uint64(mockBMBlkSize),
- }
- copy(args.diskInode.Data(), data)
-
- mockFile, err := newBlockMapFile(args)
- if err != nil {
- t.Fatalf("newBlockMapFile failed: %v", err)
- }
- return mockFile, fileData
-}
-
-// writeFileDataToBlock writes random bytes to the block on disk.
-func writeFileDataToBlock(disk []byte, blkNum uint32, height uint, blkNums *blkNumGen) []byte {
- if height == 0 {
- start := blkNum * mockBMBlkSize
- end := start + mockBMBlkSize
- rand.Read(disk[start:end])
- return disk[start:end]
- }
-
- var fileData []byte
- for off := blkNum * mockBMBlkSize; off < (blkNum+1)*mockBMBlkSize; off += 4 {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(disk[off : off+4])
- fileData = append(fileData, writeFileDataToBlock(disk, uint32(curBlkNum), height-1, blkNums)...)
- }
- return fileData
-}
-
-// getMockBMFileFize gets the size of the mock block map file which is used for
-// testing.
-func getMockBMFileFize() uint32 {
- return uint32(numDirectBlks*getCoverage(uint64(mockBMBlkSize), 0) + getCoverage(uint64(mockBMBlkSize), 1) + getCoverage(uint64(mockBMBlkSize), 2) + getCoverage(uint64(mockBMBlkSize), 3))
-}
diff --git a/pkg/sentry/fsimpl/ext/dentry.go b/pkg/sentry/fsimpl/ext/dentry.go
deleted file mode 100644
index 9bfed883a..000000000
--- a/pkg/sentry/fsimpl/ext/dentry.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// dentry implements vfs.DentryImpl.
-//
-// +stateify savable
-type dentry struct {
- vfsd vfs.Dentry
-
- // Protected by filesystem.mu.
- parent *dentry
- name string
-
- // inode is the inode represented by this dentry. Multiple Dentries may
- // share a single non-directory Inode (with hard links). inode is
- // immutable.
- inode *inode
-}
-
-// Compiles only if dentry implements vfs.DentryImpl.
-var _ vfs.DentryImpl = (*dentry)(nil)
-
-// newDentry is the dentry constructor.
-func newDentry(in *inode) *dentry {
- d := &dentry{
- inode: in,
- }
- d.vfsd.Init(d)
- return d
-}
-
-// IncRef implements vfs.DentryImpl.IncRef.
-func (d *dentry) IncRef() {
- d.inode.incRef()
-}
-
-// TryIncRef implements vfs.DentryImpl.TryIncRef.
-func (d *dentry) TryIncRef() bool {
- return d.inode.tryIncRef()
-}
-
-// DecRef implements vfs.DentryImpl.DecRef.
-func (d *dentry) DecRef(ctx context.Context) {
- // FIXME(b/134676337): filesystem.mu may not be locked as required by
- // inode.decRef().
- d.inode.decRef()
-}
-
-// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {}
-
-// Watches implements vfs.DentryImpl.Watches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) Watches() *vfs.Watches {
- return nil
-}
-
-// OnZeroWatches implements vfs.Dentry.OnZeroWatches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) OnZeroWatches(context.Context) {}
diff --git a/pkg/sentry/fsimpl/ext/directory.go b/pkg/sentry/fsimpl/ext/directory.go
deleted file mode 100644
index 0ad79b381..000000000
--- a/pkg/sentry/fsimpl/ext/directory.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// directory represents a directory inode. It holds the childList in memory.
-//
-// +stateify savable
-type directory struct {
- inode inode
-
- // childCache maps filenames to dentries for children for which dentries
- // have been instantiated. childCache is protected by filesystem.mu.
- childCache map[string]*dentry
-
- // mu serializes the changes to childList.
- // Lock Order (outermost locks must be taken first):
- // directory.mu
- // filesystem.mu
- mu sync.Mutex `state:"nosave"`
-
- // childList is a list containing (1) child dirents and (2) fake dirents
- // (with diskDirent == nil) that represent the iteration position of
- // directoryFDs. childList is used to support directoryFD.IterDirents()
- // efficiently. childList is protected by mu.
- childList direntList
-
- // childMap maps the child's filename to the dirent structure stored in
- // childList. This adds some data replication but helps in faster path
- // traversal. For consistency, key == childMap[key].diskDirent.FileName().
- // Immutable.
- childMap map[string]*dirent
-}
-
-// newDirectory is the directory constructor.
-func newDirectory(args inodeArgs, newDirent bool) (*directory, error) {
- file := &directory{
- childCache: make(map[string]*dentry),
- childMap: make(map[string]*dirent),
- }
- file.inode.init(args, file)
-
- // Initialize childList by reading dirents from the underlying file.
- if args.diskInode.Flags().Index {
- // TODO(b/134676337): Support hash tree directories. Currently only the '.'
- // and '..' entries are read in.
-
- // Users cannot navigate this hash tree directory yet.
- log.Warningf("hash tree directory being used which is unsupported")
- return file, nil
- }
-
- // The dirents are organized in a linear array in the file data.
- // Extract the file data and decode the dirents.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- // buf is used as scratch space for reading in dirents from disk and
- // unmarshalling them into dirent structs.
- buf := make([]byte, disklayout.DirentSize)
- size := args.diskInode.Size()
- for off, inc := uint64(0), uint64(0); off < size; off += inc {
- toRead := size - off
- if toRead > disklayout.DirentSize {
- toRead = disklayout.DirentSize
- }
- if n, err := regFile.impl.ReadAt(buf[:toRead], int64(off)); uint64(n) < toRead {
- return nil, err
- }
-
- var curDirent dirent
- if newDirent {
- curDirent.diskDirent = &disklayout.DirentNew{}
- } else {
- curDirent.diskDirent = &disklayout.DirentOld{}
- }
- curDirent.diskDirent.UnmarshalBytes(buf)
-
- if curDirent.diskDirent.Inode() != 0 && len(curDirent.diskDirent.FileName()) != 0 {
- // Inode number and name length fields being set to 0 is used to indicate
- // an unused dirent.
- file.childList.PushBack(&curDirent)
- file.childMap[curDirent.diskDirent.FileName()] = &curDirent
- }
-
- // The next dirent is placed exactly after this dirent record on disk.
- inc = uint64(curDirent.diskDirent.RecordSize())
- }
-
- return file, nil
-}
-
-func (i *inode) isDir() bool {
- _, ok := i.impl.(*directory)
- return ok
-}
-
-// dirent is the directory.childList node.
-//
-// +stateify savable
-type dirent struct {
- diskDirent disklayout.Dirent
-
- // direntEntry links dirents into their parent directory.childList.
- direntEntry
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type directoryFD struct {
- fileDescription
- vfs.DirectoryFileDescriptionDefaultImpl
-
- // Protected by directory.mu.
- iter *dirent
- off int64
-}
-
-// Compiles only if directoryFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*directoryFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *directoryFD) Release(ctx context.Context) {
- if fd.iter == nil {
- return
- }
-
- dir := fd.inode().impl.(*directory)
- dir.mu.Lock()
- dir.childList.Remove(fd.iter)
- dir.mu.Unlock()
- fd.iter = nil
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- extfs := fd.filesystem()
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Ensure that fd.iter exists and is not linked into dir.childList.
- var child *dirent
- if fd.iter == nil {
- // Start iteration at the beginning of dir.
- child = dir.childList.Front()
- fd.iter = &dirent{}
- } else {
- // Continue iteration from where we left off.
- child = fd.iter.Next()
- dir.childList.Remove(fd.iter)
- }
- for ; child != nil; child = child.Next() {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- childType, ok := child.diskDirent.FileType()
- if !ok {
- // We will need to read the inode off disk. Do not increment
- // ref count here because this inode is not being added to the
- // dentry tree.
- extfs.mu.Lock()
- childInode, err := extfs.getOrCreateInodeLocked(child.diskDirent.Inode())
- extfs.mu.Unlock()
- if err != nil {
- // Usage of the file description after the error is
- // undefined. This implementation would continue reading
- // from the next dirent.
- fd.off++
- dir.childList.InsertAfter(child, fd.iter)
- return err
- }
- childType = fs.ToInodeType(childInode.diskInode.Mode().FileType())
- }
-
- if err := cb.Handle(vfs.Dirent{
- Name: child.diskDirent.FileName(),
- Type: fs.ToDirentType(childType),
- Ino: uint64(child.diskDirent.Inode()),
- NextOff: fd.off + 1,
- }); err != nil {
- dir.childList.InsertBefore(child, fd.iter)
- return err
- }
- fd.off++
- }
- }
- dir.childList.PushBack(fd.iter)
- return nil
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- if whence != linux.SEEK_SET && whence != linux.SEEK_CUR {
- return 0, syserror.EINVAL
- }
-
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Find resulting offset.
- if whence == linux.SEEK_CUR {
- offset += fd.off
- }
-
- if offset < 0 {
- // lseek(2) specifies that EINVAL should be returned if the resulting offset
- // is negative.
- return 0, syserror.EINVAL
- }
-
- n := int64(len(dir.childMap))
- realWantOff := offset
- if realWantOff > n {
- realWantOff = n
- }
- realCurOff := fd.off
- if realCurOff > n {
- realCurOff = n
- }
-
- // Ensure that fd.iter exists and is linked into dir.childList so we can
- // intelligently seek from the optimal position.
- if fd.iter == nil {
- fd.iter = &dirent{}
- dir.childList.PushFront(fd.iter)
- }
-
- // Guess that iterating from the current position is optimal.
- child := fd.iter
- diff := realWantOff - realCurOff // Shows direction and magnitude of travel.
-
- // See if starting from the beginning or end is better.
- abDiff := diff
- if diff < 0 {
- abDiff = -diff
- }
- if abDiff > realWantOff {
- // Starting from the beginning is best.
- child = dir.childList.Front()
- diff = realWantOff
- } else if abDiff > (n - realWantOff) {
- // Starting from the end is best.
- child = dir.childList.Back()
- // (n - 1) because the last non-nil dirent represents the (n-1)th offset.
- diff = realWantOff - (n - 1)
- }
-
- for child != nil {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- if diff == 0 {
- if child != fd.iter {
- dir.childList.Remove(fd.iter)
- dir.childList.InsertBefore(child, fd.iter)
- }
-
- fd.off = offset
- return offset, nil
- }
-
- if diff < 0 {
- diff++
- child = child.Prev()
- } else {
- diff--
- child = child.Next()
- }
- continue
- }
-
- if diff < 0 {
- child = child.Prev()
- } else {
- child = child.Next()
- }
- }
-
- // Reaching here indicates that the offset is beyond the end of the childList.
- dir.childList.Remove(fd.iter)
- dir.childList.PushBack(fd.iter)
- fd.off = offset
- return offset, nil
-}
-
-// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.
-func (fd *directoryFD) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, start, length uint64, whence int16, block fslock.Blocker) error {
- return fd.Locks().LockPOSIX(ctx, &fd.vfsfd, uid, t, start, length, whence, block)
-}
-
-// UnlockPOSIX implements vfs.FileDescriptionImpl.UnlockPOSIX.
-func (fd *directoryFD) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, start, length uint64, whence int16) error {
- return fd.Locks().UnlockPOSIX(ctx, &fd.vfsfd, uid, start, length, whence)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/BUILD b/pkg/sentry/fsimpl/ext/disklayout/BUILD
deleted file mode 100644
index d98a05dd8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/BUILD
+++ /dev/null
@@ -1,48 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "disklayout",
- srcs = [
- "block_group.go",
- "block_group_32.go",
- "block_group_64.go",
- "dirent.go",
- "dirent_new.go",
- "dirent_old.go",
- "disklayout.go",
- "extent.go",
- "inode.go",
- "inode_new.go",
- "inode_old.go",
- "superblock.go",
- "superblock_32.go",
- "superblock_64.go",
- "superblock_old.go",
- "test_utils.go",
- ],
- marshal = True,
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/marshal",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- ],
-)
-
-go_test(
- name = "disklayout_test",
- size = "small",
- srcs = [
- "block_group_test.go",
- "dirent_test.go",
- "extent_test.go",
- "inode_test.go",
- "superblock_test.go",
- ],
- library = ":disklayout",
- deps = ["//pkg/sentry/kernel/time"],
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group.go b/pkg/sentry/fsimpl/ext/disklayout/block_group.go
deleted file mode 100644
index 0d56ae9da..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// BlockGroup represents a Linux ext block group descriptor. An ext file system
-// is split into a series of block groups. This provides an access layer to
-// information needed to access and use a block group.
-//
-// Location:
-// - The block group descriptor table is always placed in the blocks
-// immediately after the block containing the superblock.
-// - The 1st block group descriptor in the original table is in the
-// (sb.FirstDataBlock() + 1)th block.
-// - See SuperBlock docs to see where the block group descriptor table is
-// replicated.
-// - sb.BgDescSize() must be used as the block group descriptor entry size
-// while reading the table from disk.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#block-group-descriptors.
-type BlockGroup interface {
- marshal.Marshallable
-
- // InodeTable returns the absolute block number of the block containing the
- // inode table. This points to an array of Inode structs. Inode tables are
- // statically allocated at mkfs time. The superblock records the number of
- // inodes per group (length of this table) and the size of each inode struct.
- InodeTable() uint64
-
- // BlockBitmap returns the absolute block number of the block containing the
- // block bitmap. This bitmap tracks the usage of data blocks within this block
- // group and has its own checksum.
- BlockBitmap() uint64
-
- // InodeBitmap returns the absolute block number of the block containing the
- // inode bitmap. This bitmap tracks the usage of this group's inode table
- // entries and has its own checksum.
- InodeBitmap() uint64
-
- // ExclusionBitmap returns the absolute block number of the snapshot exclusion
- // bitmap.
- ExclusionBitmap() uint64
-
- // FreeBlocksCount returns the number of free blocks in the group.
- FreeBlocksCount() uint32
-
- // FreeInodesCount returns the number of free inodes in the group.
- FreeInodesCount() uint32
-
- // DirectoryCount returns the number of inodes that represent directories
- // under this block group.
- DirectoryCount() uint32
-
- // UnusedInodeCount returns the number of unused inodes beyond the last used
- // inode in this group's inode table. As a result, we needn’t scan past the
- // (InodesPerGroup - UnusedInodeCount())th entry in the inode table.
- UnusedInodeCount() uint32
-
- // BlockBitmapChecksum returns the block bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- BlockBitmapChecksum() uint32
-
- // InodeBitmapChecksum returns the inode bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- InodeBitmapChecksum() uint32
-
- // Checksum returns this block group's checksum.
- //
- // If SbMetadataCsum feature is set:
- // - checksum is crc32c(FS UUID + group number + group descriptor
- // structure) & 0xFFFF.
- //
- // If SbGdtCsum feature is set:
- // - checksum is crc16(FS UUID + group number + group descriptor
- // structure).
- //
- // SbMetadataCsum and SbGdtCsum should not be both set.
- // If they are, Linux warns and asks to run fsck.
- Checksum() uint16
-
- // Flags returns BGFlags which represents the block group flags.
- Flags() BGFlags
-}
-
-// These are the different block group flags.
-const (
- // BgInodeUninit indicates that inode table and bitmap are not initialized.
- BgInodeUninit uint16 = 0x1
-
- // BgBlockUninit indicates that block bitmap is not initialized.
- BgBlockUninit uint16 = 0x2
-
- // BgInodeZeroed indicates that inode table is zeroed.
- BgInodeZeroed uint16 = 0x4
-)
-
-// BGFlags represents all the different combinations of block group flags.
-type BGFlags struct {
- InodeUninit bool
- BlockUninit bool
- InodeZeroed bool
-}
-
-// ToInt converts a BGFlags struct back to its 16-bit representation.
-func (f BGFlags) ToInt() uint16 {
- var res uint16
-
- if f.InodeUninit {
- res |= BgInodeUninit
- }
- if f.BlockUninit {
- res |= BgBlockUninit
- }
- if f.InodeZeroed {
- res |= BgInodeZeroed
- }
-
- return res
-}
-
-// BGFlagsFromInt converts the 16-bit flag representation to a BGFlags struct.
-func BGFlagsFromInt(flags uint16) BGFlags {
- return BGFlags{
- InodeUninit: flags&BgInodeUninit > 0,
- BlockUninit: flags&BgBlockUninit > 0,
- InodeZeroed: flags&BgInodeZeroed > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
deleted file mode 100644
index a35fa22a0..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup32Bit emulates the first half of struct ext4_group_desc in
-// fs/ext4/ext4.h. It is the block group descriptor struct for ext2, ext3 and
-// 32-bit ext4 filesystems. It implements BlockGroup interface.
-//
-// +marshal
-type BlockGroup32Bit struct {
- BlockBitmapLo uint32
- InodeBitmapLo uint32
- InodeTableLo uint32
- FreeBlocksCountLo uint16
- FreeInodesCountLo uint16
- UsedDirsCountLo uint16
- FlagsRaw uint16
- ExcludeBitmapLo uint32
- BlockBitmapChecksumLo uint16
- InodeBitmapChecksumLo uint16
- ItableUnusedLo uint16
- ChecksumRaw uint16
-}
-
-// Compiles only if BlockGroup32Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup32Bit)(nil)
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup32Bit) InodeTable() uint64 { return uint64(bg.InodeTableLo) }
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup32Bit) BlockBitmap() uint64 { return uint64(bg.BlockBitmapLo) }
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup32Bit) InodeBitmap() uint64 { return uint64(bg.InodeBitmapLo) }
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup32Bit) ExclusionBitmap() uint64 { return uint64(bg.ExcludeBitmapLo) }
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup32Bit) FreeBlocksCount() uint32 { return uint32(bg.FreeBlocksCountLo) }
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup32Bit) FreeInodesCount() uint32 { return uint32(bg.FreeInodesCountLo) }
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup32Bit) DirectoryCount() uint32 { return uint32(bg.UsedDirsCountLo) }
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup32Bit) UnusedInodeCount() uint32 { return uint32(bg.ItableUnusedLo) }
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup32Bit) BlockBitmapChecksum() uint32 { return uint32(bg.BlockBitmapChecksumLo) }
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup32Bit) InodeBitmapChecksum() uint32 { return uint32(bg.InodeBitmapChecksumLo) }
-
-// Checksum implements BlockGroup.Checksum.
-func (bg *BlockGroup32Bit) Checksum() uint16 { return bg.ChecksumRaw }
-
-// Flags implements BlockGroup.Flags.
-func (bg *BlockGroup32Bit) Flags() BGFlags { return BGFlagsFromInt(bg.FlagsRaw) }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
deleted file mode 100644
index d54d1d345..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup64Bit emulates struct ext4_group_desc in fs/ext4/ext4.h.
-// It is the block group descriptor struct for 64-bit ext4 filesystems.
-// It implements BlockGroup interface. It is an extension of the 32-bit
-// version of BlockGroup.
-//
-// +marshal
-type BlockGroup64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- BlockGroup32Bit
-
- // 64-bit specific fields.
- BlockBitmapHi uint32
- InodeBitmapHi uint32
- InodeTableHi uint32
- FreeBlocksCountHi uint16
- FreeInodesCountHi uint16
- UsedDirsCountHi uint16
- ItableUnusedHi uint16
- ExcludeBitmapHi uint32
- BlockBitmapChecksumHi uint16
- InodeBitmapChecksumHi uint16
- _ uint32 // Padding to 64 bytes.
-}
-
-// Compiles only if BlockGroup64Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup64Bit)(nil)
-
-// Methods to override. Checksum() and Flags() are not overridden.
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup64Bit) InodeTable() uint64 {
- return (uint64(bg.InodeTableHi) << 32) | uint64(bg.InodeTableLo)
-}
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup64Bit) BlockBitmap() uint64 {
- return (uint64(bg.BlockBitmapHi) << 32) | uint64(bg.BlockBitmapLo)
-}
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup64Bit) InodeBitmap() uint64 {
- return (uint64(bg.InodeBitmapHi) << 32) | uint64(bg.InodeBitmapLo)
-}
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup64Bit) ExclusionBitmap() uint64 {
- return (uint64(bg.ExcludeBitmapHi) << 32) | uint64(bg.ExcludeBitmapLo)
-}
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup64Bit) FreeBlocksCount() uint32 {
- return (uint32(bg.FreeBlocksCountHi) << 16) | uint32(bg.FreeBlocksCountLo)
-}
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup64Bit) FreeInodesCount() uint32 {
- return (uint32(bg.FreeInodesCountHi) << 16) | uint32(bg.FreeInodesCountLo)
-}
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup64Bit) DirectoryCount() uint32 {
- return (uint32(bg.UsedDirsCountHi) << 16) | uint32(bg.UsedDirsCountLo)
-}
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup64Bit) UnusedInodeCount() uint32 {
- return (uint32(bg.ItableUnusedHi) << 16) | uint32(bg.ItableUnusedLo)
-}
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup64Bit) BlockBitmapChecksum() uint32 {
- return (uint32(bg.BlockBitmapChecksumHi) << 16) | uint32(bg.BlockBitmapChecksumLo)
-}
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup64Bit) InodeBitmapChecksum() uint32 {
- return (uint32(bg.InodeBitmapChecksumHi) << 16) | uint32(bg.InodeBitmapChecksumLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
deleted file mode 100644
index e4ce484e4..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestBlockGroupSize tests that the block group descriptor structs are of the
-// correct size.
-func TestBlockGroupSize(t *testing.T) {
- var bgSmall BlockGroup32Bit
- assertSize(t, &bgSmall, 32)
- var bgBig BlockGroup64Bit
- assertSize(t, &bgBig, 64)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent.go b/pkg/sentry/fsimpl/ext/disklayout/dirent.go
deleted file mode 100644
index 568c8cb4c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-const (
- // MaxFileName is the maximum length of an ext fs file's name.
- MaxFileName = 255
-
- // DirentSize is the size of ext dirent structures.
- DirentSize = 263
-)
-
-var (
- // inodeTypeByFileType maps ext4 file types to vfs inode types.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#ftype.
- inodeTypeByFileType = map[uint8]fs.InodeType{
- 0: fs.Anonymous,
- 1: fs.RegularFile,
- 2: fs.Directory,
- 3: fs.CharacterDevice,
- 4: fs.BlockDevice,
- 5: fs.Pipe,
- 6: fs.Socket,
- 7: fs.Symlink,
- }
-)
-
-// The Dirent interface should be implemented by structs representing ext
-// directory entries. These are for the linear classical directories which
-// just store a list of dirent structs. A directory is a series of data blocks
-// where is each data block contains a linear array of dirents. The last entry
-// of the block has a record size that takes it to the end of the block. The
-// end of the directory is when you read dirInode.Size() bytes from the blocks.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#linear-classic-directories.
-type Dirent interface {
- marshal.Marshallable
-
- // Inode returns the absolute inode number of the underlying inode.
- // Inode number 0 signifies an unused dirent.
- Inode() uint32
-
- // RecordSize returns the record length of this dirent on disk. The next
- // dirent in the dirent list should be read after these many bytes from
- // the current dirent. Must be a multiple of 4.
- RecordSize() uint16
-
- // FileName returns the name of the file. Can be at most 255 is length.
- FileName() string
-
- // FileType returns the inode type of the underlying inode. This is a
- // performance hack so that we do not have to read the underlying inode struct
- // to know the type of inode. This will only work when the SbDirentFileType
- // feature is set. If not, the second returned value will be false indicating
- // that user code has to use the inode mode to extract the file type.
- FileType() (fs.InodeType, bool)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
deleted file mode 100644
index 51f9c2946..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
-
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-// DirentNew represents the ext4 directory entry struct. This emulates Linux's
-// ext4_dir_entry_2 struct. The FileName can not be more than 255 bytes so we
-// only need 8 bits to store the NameLength. As a result, NameLength has been
-// shortened and the other 8 bits are used to encode the file type. Use the
-// FileTypeRaw field only if the SbDirentFileType feature is set.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentNew struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint8
- FileTypeRaw uint8
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentNew implements Dirent.
-var _ Dirent = (*DirentNew)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentNew) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentNew) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentNew) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentNew) FileType() (fs.InodeType, bool) {
- if inodeType, ok := inodeTypeByFileType[d.FileTypeRaw]; ok {
- return inodeType, true
- }
-
- panic(fmt.Sprintf("unknown file type %v", d.FileTypeRaw))
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
deleted file mode 100644
index d4b19e086..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/fs"
-
-// DirentOld represents the old directory entry struct which does not contain
-// the file type. This emulates Linux's ext4_dir_entry struct.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentOld struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint16
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentOld implements Dirent.
-var _ Dirent = (*DirentOld)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentOld) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentOld) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentOld) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentOld) FileType() (fs.InodeType, bool) {
- return fs.Anonymous, false
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
deleted file mode 100644
index 3486864dc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestDirentSize tests that the dirent structs are of the correct
-// size.
-func TestDirentSize(t *testing.T) {
- var dOld DirentOld
- assertSize(t, &dOld, DirentSize)
- var dNew DirentNew
- assertSize(t, &dNew, DirentSize)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go b/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
deleted file mode 100644
index 0834e9ba8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package disklayout provides Linux ext file system's disk level structures
-// which can be directly read into from the underlying device. Structs aim to
-// emulate structures `exactly` how they are layed out on disk.
-//
-// This library aims to be compatible with all ext(2/3/4) systems so it
-// provides a generic interface for all major structures and various
-// implementations (for different versions). The user code is responsible for
-// using appropriate implementations based on the underlying device.
-//
-// Interfacing all major structures here serves a few purposes:
-// - Abstracts away the complexity of the underlying structure from client
-// code. The client only has to figure out versioning on set up and then
-// can use these as black boxes and pass it higher up the stack.
-// - Having pointer receivers forces the user to use pointers to these
-// heavy structs. Hence, prevents the client code from unintentionally
-// copying these by value while passing the interface around.
-// - Version-based implementation selection is resolved on set up hence
-// avoiding per call overhead of choosing implementation.
-// - All interface methods are pretty light weight (do not take in any
-// parameters by design). Passing pointer arguments to interface methods
-// can lead to heap allocation as the compiler won't be able to perform
-// escape analysis on an unknown implementation at compile time.
-//
-// Notes:
-// - All structures on disk are in little-endian order. Only jbd2 (journal)
-// structures are in big-endian order.
-// - All OS dependent fields in these structures will be interpretted using
-// the Linux version of that field.
-// - The suffix `Lo` in field names stands for lower bits of that field.
-// - The suffix `Hi` in field names stands for upper bits of that field.
-// - The suffix `Raw` has been added to indicate that the field is not split
-// into Lo and Hi fields and also to resolve name collision with the
-// respective interface.
-package disklayout
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent.go b/pkg/sentry/fsimpl/ext/disklayout/extent.go
deleted file mode 100644
index b13999bfc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/extent.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// Extents were introduced in ext4 and provide huge performance gains in terms
-// data locality and reduced metadata block usage. Extents are organized in
-// extent trees. The root node is contained in inode.BlocksRaw.
-//
-// Terminology:
-// - Physical Block:
-// Filesystem data block which is addressed normally wrt the entire
-// filesystem (addressed with 48 bits).
-//
-// - File Block:
-// Data block containing *only* file data and addressed wrt to the file
-// with only 32 bits. The (i)th file block contains file data from
-// byte (i * sb.BlockSize()) to ((i+1) * sb.BlockSize()).
-
-const (
- // ExtentHeaderSize is the size of the header of an extent tree node.
- ExtentHeaderSize = 12
-
- // ExtentEntrySize is the size of an entry in an extent tree node.
- // This size is the same for both leaf and internal nodes.
- ExtentEntrySize = 12
-
- // ExtentMagic is the magic number which must be present in the header.
- ExtentMagic = 0xf30a
-)
-
-// ExtentEntryPair couples an in-memory ExtendNode with the ExtentEntry that
-// points to it. We want to cache these structs in memory to avoid repeated
-// disk reads.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentEntryPair struct {
- // Entry points to the child node on disk.
- Entry ExtentEntry
- // Node points to child node in memory. Is nil if the current node is a leaf.
- Node *ExtentNode
-}
-
-// ExtentNode represents an extent tree node. For internal nodes, all Entries
-// will be ExtendIdxs. For leaf nodes, they will all be Extents.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentNode struct {
- Header ExtentHeader
- Entries []ExtentEntryPair
-}
-
-// ExtentEntry represents an extent tree node entry. The entry can either be
-// an ExtentIdx or Extent itself. This exists to simplify navigation logic.
-type ExtentEntry interface {
- marshal.Marshallable
-
- // FileBlock returns the first file block number covered by this entry.
- FileBlock() uint32
-
- // PhysicalBlock returns the child physical block that this entry points to.
- PhysicalBlock() uint64
-}
-
-// ExtentHeader emulates the ext4_extent_header struct in ext4. Each extent
-// tree node begins with this and is followed by `NumEntries` number of:
-// - Extent if `Depth` == 0
-// - ExtentIdx otherwise
-//
-// +marshal
-type ExtentHeader struct {
- // Magic in the extent magic number, must be 0xf30a.
- Magic uint16
-
- // NumEntries indicates the number of valid entries following the header.
- NumEntries uint16
-
- // MaxEntries that could follow the header. Used while adding entries.
- MaxEntries uint16
-
- // Height represents the distance of this node from the farthest leaf. Please
- // note that Linux incorrectly calls this `Depth` (which means the distance
- // of the node from the root).
- Height uint16
- _ uint32
-}
-
-// ExtentIdx emulates the ext4_extent_idx struct in ext4. Only present in
-// internal nodes. Sorted in ascending order based on FirstFileBlock since
-// Linux does a binary search on this. This points to a block containing the
-// child node.
-//
-// +marshal
-type ExtentIdx struct {
- FirstFileBlock uint32
- ChildBlockLo uint32
- ChildBlockHi uint16
- _ uint16
-}
-
-// Compiles only if ExtentIdx implements ExtentEntry.
-var _ ExtentEntry = (*ExtentIdx)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (ei *ExtentIdx) FileBlock() uint32 {
- return ei.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the child block.
-func (ei *ExtentIdx) PhysicalBlock() uint64 {
- return (uint64(ei.ChildBlockHi) << 32) | uint64(ei.ChildBlockLo)
-}
-
-// Extent represents the ext4_extent struct in ext4. Only present in leaf
-// nodes. Sorted in ascending order based on FirstFileBlock since Linux does a
-// binary search on this. This points to an array of data blocks containing the
-// file data. It covers `Length` data blocks starting from `StartBlock`.
-//
-// +marshal
-type Extent struct {
- FirstFileBlock uint32
- Length uint16
- StartBlockHi uint16
- StartBlockLo uint32
-}
-
-// Compiles only if Extent implements ExtentEntry.
-var _ ExtentEntry = (*Extent)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (e *Extent) FileBlock() uint32 {
- return e.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the first data block this extent covers.
-func (e *Extent) PhysicalBlock() uint64 {
- return (uint64(e.StartBlockHi) << 32) | uint64(e.StartBlockLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go b/pkg/sentry/fsimpl/ext/disklayout/extent_test.go
deleted file mode 100644
index c96002e19..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestExtentSize tests that the extent structs are of the correct
-// size.
-func TestExtentSize(t *testing.T) {
- var h ExtentHeader
- assertSize(t, &h, ExtentHeaderSize)
- var i ExtentIdx
- assertSize(t, &i, ExtentEntrySize)
- var e Extent
- assertSize(t, &e, ExtentEntrySize)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode.go b/pkg/sentry/fsimpl/ext/disklayout/inode.go
deleted file mode 100644
index ef25040a9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// Special inodes. See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#special-inodes.
-const (
- // RootDirInode is the inode number of the root directory inode.
- RootDirInode = 2
-)
-
-// The Inode interface must be implemented by structs representing ext inodes.
-// The inode stores all the metadata pertaining to the file (except for the
-// file name which is held by the directory entry). It does NOT expose all
-// fields and should be extended if need be.
-//
-// Some file systems (e.g. FAT) use the directory entry to store all this
-// information. Ext file systems do not so that they can support hard links.
-// However, ext4 cheats a little bit and duplicates the file type in the
-// directory entry for performance gains.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#index-nodes.
-type Inode interface {
- marshal.Marshallable
-
- // Mode returns the linux file mode which is majorly used to extract
- // information like:
- // - File permissions (read/write/execute by user/group/others).
- // - Sticky, set UID and GID bits.
- // - File type.
- //
- // Masks to extract this information are provided in pkg/abi/linux/file.go.
- Mode() linux.FileMode
-
- // UID returns the owner UID.
- UID() auth.KUID
-
- // GID returns the owner GID.
- GID() auth.KGID
-
- // Size returns the size of the file in bytes.
- Size() uint64
-
- // InodeSize returns the size of this inode struct in bytes.
- // In ext2 and ext3, the inode struct and inode disk record size was fixed at
- // 128 bytes. Ext4 makes it possible for the inode struct to be bigger.
- // However, accessing any field beyond the 128 bytes marker must be verified
- // using this method.
- InodeSize() uint16
-
- // AccessTime returns the last access time. Shows when the file was last read.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the extended attribute value checksum.
- AccessTime() time.Time
-
- // ChangeTime returns the last change time. Shows when the file meta data
- // (like permissions) was last changed.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the lower 32 bits of the attribute
- // value’s reference count.
- ChangeTime() time.Time
-
- // ModificationTime returns the last modification time. Shows when the file
- // content was last modified.
- //
- // If InExtendedAttr is set, then this should NOT be used because
- // the underlying field contains the number of the inode that owns the
- // extended attribute.
- ModificationTime() time.Time
-
- // DeletionTime returns the deletion time. Inodes are marked as deleted by
- // writing to the underlying field. FS tools can restore files until they are
- // actually overwritten.
- DeletionTime() time.Time
-
- // LinksCount returns the number of hard links to this inode.
- //
- // Normally there is an upper limit on the number of hard links:
- // - ext2/ext3 = 32,000
- // - ext4 = 65,000
- //
- // This implies that an ext4 directory cannot have more than 64,998
- // subdirectories because each subdirectory will have a hard link to the
- // directory via the `..` entry. The directory has hard link via the `.` entry
- // of its own. And finally the inode is initiated with 1 hard link (itself).
- //
- // The underlying value is reset to 1 if all the following hold:
- // - Inode is a directory.
- // - SbDirNlink is enabled.
- // - Number of hard links is incremented past 64,999.
- // Hard link value of 1 for a directory would indicate that the number of hard
- // links is unknown because a directory can have minimum 2 hard links (itself
- // and `.` entry).
- LinksCount() uint16
-
- // Flags returns InodeFlags which represents the inode flags.
- Flags() InodeFlags
-
- // Data returns the underlying inode.i_block array as a slice so it's
- // modifiable. This field is special and is used to store various kinds of
- // things depending on the filesystem version and inode type. The underlying
- // field name in Linux is a little misleading.
- // - In ext2/ext3, it contains the block map.
- // - In ext4, it contains the extent tree root node.
- // - For inline files, it contains the file contents.
- // - For symlinks, it contains the link path (if it fits here).
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#the-contents-of-inode-i-block.
- Data() []byte
-}
-
-// Inode flags. This is not comprehensive and flags which were not used in
-// the Linux kernel have been excluded.
-const (
- // InSync indicates that all writes to the file must be synchronous.
- InSync = 0x8
-
- // InImmutable indicates that this file is immutable.
- InImmutable = 0x10
-
- // InAppend indicates that this file can only be appended to.
- InAppend = 0x20
-
- // InNoDump indicates that teh dump(1) utility should not dump this file.
- InNoDump = 0x40
-
- // InNoAccessTime indicates that the access time of this inode must not be
- // updated.
- InNoAccessTime = 0x80
-
- // InIndex indicates that this directory has hashed indexes.
- InIndex = 0x1000
-
- // InJournalData indicates that file data must always be written through a
- // journal device.
- InJournalData = 0x4000
-
- // InDirSync indicates that all the directory entiry data must be written
- // synchronously.
- InDirSync = 0x10000
-
- // InTopDir indicates that this inode is at the top of the directory hierarchy.
- InTopDir = 0x20000
-
- // InHugeFile indicates that this is a huge file.
- InHugeFile = 0x40000
-
- // InExtents indicates that this inode uses extents.
- InExtents = 0x80000
-
- // InExtendedAttr indicates that this inode stores a large extended attribute
- // value in its data blocks.
- InExtendedAttr = 0x200000
-
- // InInline indicates that this inode has inline data.
- InInline = 0x10000000
-
- // InReserved indicates that this inode is reserved for the ext4 library.
- InReserved = 0x80000000
-)
-
-// InodeFlags represents all possible combinations of inode flags. It aims to
-// cover the bit masks and provide a more user-friendly interface.
-type InodeFlags struct {
- Sync bool
- Immutable bool
- Append bool
- NoDump bool
- NoAccessTime bool
- Index bool
- JournalData bool
- DirSync bool
- TopDir bool
- HugeFile bool
- Extents bool
- ExtendedAttr bool
- Inline bool
- Reserved bool
-}
-
-// ToInt converts inode flags back to its 32-bit rep.
-func (f InodeFlags) ToInt() uint32 {
- var res uint32
-
- if f.Sync {
- res |= InSync
- }
- if f.Immutable {
- res |= InImmutable
- }
- if f.Append {
- res |= InAppend
- }
- if f.NoDump {
- res |= InNoDump
- }
- if f.NoAccessTime {
- res |= InNoAccessTime
- }
- if f.Index {
- res |= InIndex
- }
- if f.JournalData {
- res |= InJournalData
- }
- if f.DirSync {
- res |= InDirSync
- }
- if f.TopDir {
- res |= InTopDir
- }
- if f.HugeFile {
- res |= InHugeFile
- }
- if f.Extents {
- res |= InExtents
- }
- if f.ExtendedAttr {
- res |= InExtendedAttr
- }
- if f.Inline {
- res |= InInline
- }
- if f.Reserved {
- res |= InReserved
- }
-
- return res
-}
-
-// InodeFlagsFromInt converts the integer representation of inode flags to
-// a InodeFlags struct.
-func InodeFlagsFromInt(f uint32) InodeFlags {
- return InodeFlags{
- Sync: f&InSync > 0,
- Immutable: f&InImmutable > 0,
- Append: f&InAppend > 0,
- NoDump: f&InNoDump > 0,
- NoAccessTime: f&InNoAccessTime > 0,
- Index: f&InIndex > 0,
- JournalData: f&InJournalData > 0,
- DirSync: f&InDirSync > 0,
- TopDir: f&InTopDir > 0,
- HugeFile: f&InHugeFile > 0,
- Extents: f&InExtents > 0,
- ExtendedAttr: f&InExtendedAttr > 0,
- Inline: f&InInline > 0,
- Reserved: f&InReserved > 0,
- }
-}
-
-// These masks define how users can view/modify inode flags. The rest of the
-// flags are for internal kernel usage only.
-const (
- InUserReadFlagMask = 0x4BDFFF
- InUserWriteFlagMask = 0x4B80FF
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go b/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
deleted file mode 100644
index a4503f5cf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-
-// InodeNew represents ext4 inode structure which can be bigger than
-// OldInodeSize. The actual size of this struct should be determined using
-// inode.ExtraInodeSize. Accessing any field here should be verified with the
-// actual size. The extra space between the end of the inode struct and end of
-// the inode record can be used to store extended attr.
-//
-// If the TimeExtra fields are in scope, the lower 2 bits of those are used
-// to extend their counter part to be 34 bits wide; the rest (upper) 30 bits
-// are used to provide nanoscond precision. Hence, these timestamps will now
-// overflow in May 2446.
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-//
-// +marshal
-type InodeNew struct {
- InodeOld
-
- ExtraInodeSize uint16
- ChecksumHi uint16
- ChangeTimeExtra uint32
- ModificationTimeExtra uint32
- AccessTimeExtra uint32
- CreationTime uint32
- CreationTimeExtra uint32
- VersionHi uint32
- ProjectID uint32
-}
-
-// Compiles only if InodeNew implements Inode.
-var _ Inode = (*InodeNew)(nil)
-
-// fromExtraTime decodes the extra time and constructs the kernel time struct
-// with nanosecond precision.
-func fromExtraTime(lo int32, extra uint32) time.Time {
- // See description above InodeNew for format.
- seconds := (int64(extra&0x3) << 32) + int64(lo)
- nanoseconds := int64(extra >> 2)
- return time.FromUnix(seconds, nanoseconds)
-}
-
-// Only override methods which change due to ext4 specific fields.
-
-// Size implements Inode.Size.
-func (in *InodeNew) Size() uint64 {
- return (uint64(in.SizeHi) << 32) | uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeNew) InodeSize() uint16 {
- return OldInodeSize + in.ExtraInodeSize
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeNew) ChangeTime() time.Time {
- // Apply new timestamp logic if inode.ChangeTimeExtra is in scope.
- if in.ExtraInodeSize >= 8 {
- return fromExtraTime(in.ChangeTimeRaw, in.ChangeTimeExtra)
- }
-
- return in.InodeOld.ChangeTime()
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeNew) ModificationTime() time.Time {
- // Apply new timestamp logic if inode.ModificationTimeExtra is in scope.
- if in.ExtraInodeSize >= 12 {
- return fromExtraTime(in.ModificationTimeRaw, in.ModificationTimeExtra)
- }
-
- return in.InodeOld.ModificationTime()
-}
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeNew) AccessTime() time.Time {
- // Apply new timestamp logic if inode.AccessTimeExtra is in scope.
- if in.ExtraInodeSize >= 16 {
- return fromExtraTime(in.AccessTimeRaw, in.AccessTimeExtra)
- }
-
- return in.InodeOld.AccessTime()
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go b/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
deleted file mode 100644
index e6b28babf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-const (
- // OldInodeSize is the inode size in ext2/ext3.
- OldInodeSize = 128
-)
-
-// InodeOld implements Inode interface. It emulates ext2/ext3 inode struct.
-// Inode struct size and record size are both 128 bytes for this.
-//
-// All fields representing time are in seconds since the epoch. Which means that
-// they will overflow in January 2038.
-//
-// +marshal
-type InodeOld struct {
- ModeRaw uint16
- UIDLo uint16
- SizeLo uint32
-
- // The time fields are signed integers because they could be negative to
- // represent time before the epoch.
- AccessTimeRaw int32
- ChangeTimeRaw int32
- ModificationTimeRaw int32
- DeletionTimeRaw int32
-
- GIDLo uint16
- LinksCountRaw uint16
- BlocksCountLo uint32
- FlagsRaw uint32
- VersionLo uint32 // This is OS dependent.
- DataRaw [60]byte
- Generation uint32
- FileACLLo uint32
- SizeHi uint32
- ObsoFaddr uint32
-
- // OS dependent fields have been inlined here.
- BlocksCountHi uint16
- FileACLHi uint16
- UIDHi uint16
- GIDHi uint16
- ChecksumLo uint16
- _ uint16
-}
-
-// Compiles only if InodeOld implements Inode.
-var _ Inode = (*InodeOld)(nil)
-
-// Mode implements Inode.Mode.
-func (in *InodeOld) Mode() linux.FileMode { return linux.FileMode(in.ModeRaw) }
-
-// UID implements Inode.UID.
-func (in *InodeOld) UID() auth.KUID {
- return auth.KUID((uint32(in.UIDHi) << 16) | uint32(in.UIDLo))
-}
-
-// GID implements Inode.GID.
-func (in *InodeOld) GID() auth.KGID {
- return auth.KGID((uint32(in.GIDHi) << 16) | uint32(in.GIDLo))
-}
-
-// Size implements Inode.Size.
-func (in *InodeOld) Size() uint64 {
- // In ext2/ext3, in.SizeHi did not exist, it was instead named in.DirACL.
- return uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeOld) InodeSize() uint16 { return OldInodeSize }
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeOld) AccessTime() time.Time {
- return time.FromUnix(int64(in.AccessTimeRaw), 0)
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeOld) ChangeTime() time.Time {
- return time.FromUnix(int64(in.ChangeTimeRaw), 0)
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeOld) ModificationTime() time.Time {
- return time.FromUnix(int64(in.ModificationTimeRaw), 0)
-}
-
-// DeletionTime implements Inode.DeletionTime.
-func (in *InodeOld) DeletionTime() time.Time {
- return time.FromUnix(int64(in.DeletionTimeRaw), 0)
-}
-
-// LinksCount implements Inode.LinksCount.
-func (in *InodeOld) LinksCount() uint16 { return in.LinksCountRaw }
-
-// Flags implements Inode.Flags.
-func (in *InodeOld) Flags() InodeFlags { return InodeFlagsFromInt(in.FlagsRaw) }
-
-// Data implements Inode.Data.
-func (in *InodeOld) Data() []byte { return in.DataRaw[:] }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go b/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
deleted file mode 100644
index 90744e956..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// TestInodeSize tests that the inode structs are of the correct size.
-func TestInodeSize(t *testing.T) {
- var iOld InodeOld
- assertSize(t, &iOld, OldInodeSize)
-
- // This was updated from 156 bytes to 160 bytes in Oct 2015.
- var iNew InodeNew
- assertSize(t, &iNew, 160)
-}
-
-// TestTimestampSeconds tests that the seconds part of [a/c/m] timestamps in
-// ext4 inode structs are decoded correctly.
-//
-// These tests are derived from the table under https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-func TestTimestampSeconds(t *testing.T) {
- type timestampTest struct {
- // msbSet tells if the most significant bit of InodeOld.[X]TimeRaw is set.
- // If this is set then the 32-bit time is negative.
- msbSet bool
-
- // lowerBound tells if we should take the lowest possible value of
- // InodeOld.[X]TimeRaw while satisfying test.msbSet condition. If set to
- // false it tells to take the highest possible value.
- lowerBound bool
-
- // extraBits is InodeNew.[X]TimeExtra.
- extraBits uint32
-
- // want is the kernel time struct that is expected.
- want time.Time
- }
-
- tests := []timestampTest{
- // 1901-12-13
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(-0x80000000), 0),
- },
-
- // 1969-12-31
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(-1), 0),
- },
-
- // 1970-01-01
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(0), 0),
- },
-
- // 2038-01-19
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(0x7fffffff), 0),
- },
-
- // 2038-01-19
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x80000000), 0),
- },
-
- // 2106-02-07
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0xffffffff), 0),
- },
-
- // 2106-02-07
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x100000000), 0),
- },
-
- // 2174-02-25
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0x17fffffff), 0),
- },
-
- // 2174-02-25
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x180000000), 0),
- },
-
- // 2242-03-16
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x1ffffffff), 0),
- },
-
- // 2242-03-16
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x200000000), 0),
- },
-
- // 2310-04-04
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x27fffffff), 0),
- },
-
- // 2310-04-04
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x280000000), 0),
- },
-
- // 2378-04-22
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x2ffffffff), 0),
- },
-
- // 2378-04-22
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x300000000), 0),
- },
-
- // 2446-05-10
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x37fffffff), 0),
- },
- }
-
- lowerMSB0 := int32(0) // binary: 00000000 00000000 00000000 00000000
- upperMSB0 := int32(0x7fffffff) // binary: 01111111 11111111 11111111 11111111
- lowerMSB1 := int32(-0x80000000) // binary: 10000000 00000000 00000000 00000000
- upperMSB1 := int32(-1) // binary: 11111111 11111111 11111111 11111111
-
- get32BitTime := func(test timestampTest) int32 {
- if test.msbSet {
- if test.lowerBound {
- return lowerMSB1
- }
-
- return upperMSB1
- }
-
- if test.lowerBound {
- return lowerMSB0
- }
-
- return upperMSB0
- }
-
- getTestName := func(test timestampTest) string {
- return fmt.Sprintf(
- "Tests time decoding with epoch bits 0b%s and 32-bit raw time: MSB set=%t, lower bound=%t",
- strconv.FormatInt(int64(test.extraBits), 2),
- test.msbSet,
- test.lowerBound,
- )
- }
-
- for _, test := range tests {
- t.Run(getTestName(test), func(t *testing.T) {
- if got := fromExtraTime(get32BitTime(test), test.extraBits); got != test.want {
- t.Errorf("Expected: %v, Got: %v", test.want, got)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock.go b/pkg/sentry/fsimpl/ext/disklayout/superblock.go
deleted file mode 100644
index 70948ebe9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock.go
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-const (
- // SbOffset is the absolute offset at which the superblock is placed.
- SbOffset = 1024
-)
-
-// SuperBlock should be implemented by structs representing the ext superblock.
-// The superblock holds a lot of information about the enclosing filesystem.
-// This interface aims to provide access methods to important information held
-// by the superblock. It does NOT expose all fields of the superblock, only the
-// ones necessary. This can be expanded when need be.
-//
-// Location and replication:
-// - The superblock is located at offset 1024 in block group 0.
-// - Redundant copies of the superblock and group descriptors are kept in
-// all groups if SbSparse feature flag is NOT set. If it is set, the
-// replicas only exist in groups whose group number is either 0 or a
-// power of 3, 5, or 7.
-// - There is also a sparse superblock feature v2 in which there are just
-// two replicas saved in the block groups pointed by sb.s_backup_bgs.
-//
-// Replicas should eventually be updated if the superblock is updated.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#super-block.
-type SuperBlock interface {
- marshal.Marshallable
-
- // InodesCount returns the total number of inodes in this filesystem.
- InodesCount() uint32
-
- // BlocksCount returns the total number of data blocks in this filesystem.
- BlocksCount() uint64
-
- // FreeBlocksCount returns the number of free blocks in this filesystem.
- FreeBlocksCount() uint64
-
- // FreeInodesCount returns the number of free inodes in this filesystem.
- FreeInodesCount() uint32
-
- // MountCount returns the number of mounts since the last fsck.
- MountCount() uint16
-
- // MaxMountCount returns the number of mounts allowed beyond which a fsck is
- // needed.
- MaxMountCount() uint16
-
- // FirstDataBlock returns the absolute block number of the first data block,
- // which contains the super block itself.
- //
- // If the filesystem has 1kb data blocks then this should return 1. For all
- // other configurations, this typically returns 0.
- FirstDataBlock() uint32
-
- // BlockSize returns the size of one data block in this filesystem.
- // This can be calculated by 2^(10 + sb.s_log_block_size). This ensures that
- // the smallest block size is 1kb.
- BlockSize() uint64
-
- // BlocksPerGroup returns the number of data blocks in a block group.
- BlocksPerGroup() uint32
-
- // ClusterSize returns block cluster size (set during mkfs time by admin).
- // This can be calculated by 2^(10 + sb.s_log_cluster_size). This ensures that
- // the smallest cluster size is 1kb.
- //
- // sb.s_log_cluster_size must equal sb.s_log_block_size if bigalloc feature
- // is NOT set and consequently BlockSize() = ClusterSize() in that case.
- ClusterSize() uint64
-
- // ClustersPerGroup returns:
- // - number of clusters per group if bigalloc is enabled.
- // - BlocksPerGroup() otherwise.
- ClustersPerGroup() uint32
-
- // InodeSize returns the size of the inode disk record size in bytes. Use this
- // to iterate over inode arrays on disk.
- //
- // In ext2 and ext3:
- // - Each inode had a disk record of 128 bytes.
- // - The inode struct size was fixed at 128 bytes.
- //
- // In ext4 its possible to allocate larger on-disk inodes:
- // - Inode disk record size = sb.s_inode_size (function return value).
- // = 256 (default)
- // - Inode struct size = 128 + inode.i_extra_isize.
- // = 128 + 32 = 160 (default)
- InodeSize() uint16
-
- // InodesPerGroup returns the number of inodes in a block group.
- InodesPerGroup() uint32
-
- // BgDescSize returns the size of the block group descriptor struct.
- //
- // In ext2, ext3, ext4 (without 64-bit feature), the block group descriptor
- // is only 32 bytes long.
- // In ext4 with 64-bit feature, the block group descriptor expands to AT LEAST
- // 64 bytes. It might be bigger than that.
- BgDescSize() uint16
-
- // CompatibleFeatures returns the CompatFeatures struct which holds all the
- // compatible features this fs supports.
- CompatibleFeatures() CompatFeatures
-
- // IncompatibleFeatures returns the CompatFeatures struct which holds all the
- // incompatible features this fs supports.
- IncompatibleFeatures() IncompatFeatures
-
- // ReadOnlyCompatibleFeatures returns the CompatFeatures struct which holds all the
- // readonly compatible features this fs supports.
- ReadOnlyCompatibleFeatures() RoCompatFeatures
-
- // Magic() returns the magic signature which must be 0xef53.
- Magic() uint16
-
- // Revision returns the superblock revision. Superblock struct fields from
- // offset 0x54 till 0x150 should only be used if superblock has DynamicRev.
- Revision() SbRevision
-}
-
-// SbRevision is the type for superblock revisions.
-type SbRevision uint32
-
-// Super block revisions.
-const (
- // OldRev is the good old (original) format.
- OldRev SbRevision = 0
-
- // DynamicRev is v2 format w/ dynamic inode sizes.
- DynamicRev SbRevision = 1
-)
-
-// Superblock compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirPrealloc indicates directory preallocation.
- SbDirPrealloc = 0x1
-
- // SbHasJournal indicates the presence of a journal. jbd2 should only work
- // with this being set.
- SbHasJournal = 0x4
-
- // SbExtAttr indicates extended attributes support.
- SbExtAttr = 0x8
-
- // SbResizeInode indicates that the fs has reserved GDT blocks (right after
- // group descriptors) for fs expansion.
- SbResizeInode = 0x10
-
- // SbDirIndex indicates that the fs has directory indices.
- SbDirIndex = 0x20
-
- // SbSparseV2 stands for Sparse superblock version 2.
- SbSparseV2 = 0x200
-)
-
-// CompatFeatures represents a superblock's compatible feature set. If the
-// kernel does not understand any of these feature, it can still read/write
-// to this fs.
-type CompatFeatures struct {
- DirPrealloc bool
- HasJournal bool
- ExtAttr bool
- ResizeInode bool
- DirIndex bool
- SparseV2 bool
-}
-
-// ToInt converts superblock compatible features back to its 32-bit rep.
-func (f CompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirPrealloc {
- res |= SbDirPrealloc
- }
- if f.HasJournal {
- res |= SbHasJournal
- }
- if f.ExtAttr {
- res |= SbExtAttr
- }
- if f.ResizeInode {
- res |= SbResizeInode
- }
- if f.DirIndex {
- res |= SbDirIndex
- }
- if f.SparseV2 {
- res |= SbSparseV2
- }
-
- return res
-}
-
-// CompatFeaturesFromInt converts the integer representation of superblock
-// compatible features to CompatFeatures struct.
-func CompatFeaturesFromInt(f uint32) CompatFeatures {
- return CompatFeatures{
- DirPrealloc: f&SbDirPrealloc > 0,
- HasJournal: f&SbHasJournal > 0,
- ExtAttr: f&SbExtAttr > 0,
- ResizeInode: f&SbResizeInode > 0,
- DirIndex: f&SbDirIndex > 0,
- SparseV2: f&SbSparseV2 > 0,
- }
-}
-
-// Superblock incompatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirentFileType indicates that directory entries record the file type.
- // We should use struct DirentNew for dirents then.
- SbDirentFileType = 0x2
-
- // SbRecovery indicates that the filesystem needs recovery.
- SbRecovery = 0x4
-
- // SbJournalDev indicates that the filesystem has a separate journal device.
- SbJournalDev = 0x8
-
- // SbMetaBG indicates that the filesystem is using Meta block groups. Moves
- // the group descriptors from the congested first block group into the first
- // group of each metablock group to increase the maximum block groups limit
- // and hence support much larger filesystems.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#meta-block-groups.
- SbMetaBG = 0x10
-
- // SbExtents indicates that the filesystem uses extents. Must be set in ext4
- // filesystems.
- SbExtents = 0x40
-
- // SbIs64Bit indicates that this filesystem addresses blocks with 64-bits.
- // Hence can support 2^64 data blocks.
- SbIs64Bit = 0x80
-
- // SbMMP indicates that this filesystem has multiple mount protection.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#multiple-mount-protection.
- SbMMP = 0x100
-
- // SbFlexBg indicates that this filesystem has flexible block groups. Several
- // block groups are tied into one logical block group so that all the metadata
- // for the block groups (bitmaps and inode tables) are close together for
- // faster loading. Consequently, large files will be continuous on disk.
- // However, this does not affect the placement of redundant superblocks and
- // group descriptors.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#flexible-block-groups.
- SbFlexBg = 0x200
-
- // SbLargeDir shows that large directory enabled. Directory htree can be 3
- // levels deep. Directory htrees are allowed to be 2 levels deep otherwise.
- SbLargeDir = 0x4000
-
- // SbInlineData allows inline data in inodes for really small files.
- SbInlineData = 0x8000
-
- // SbEncrypted indicates that this fs contains encrypted inodes.
- SbEncrypted = 0x10000
-)
-
-// IncompatFeatures represents a superblock's incompatible feature set. If the
-// kernel does not understand any of these feature, it should refuse to mount.
-type IncompatFeatures struct {
- DirentFileType bool
- Recovery bool
- JournalDev bool
- MetaBG bool
- Extents bool
- Is64Bit bool
- MMP bool
- FlexBg bool
- LargeDir bool
- InlineData bool
- Encrypted bool
-}
-
-// ToInt converts superblock incompatible features back to its 32-bit rep.
-func (f IncompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirentFileType {
- res |= SbDirentFileType
- }
- if f.Recovery {
- res |= SbRecovery
- }
- if f.JournalDev {
- res |= SbJournalDev
- }
- if f.MetaBG {
- res |= SbMetaBG
- }
- if f.Extents {
- res |= SbExtents
- }
- if f.Is64Bit {
- res |= SbIs64Bit
- }
- if f.MMP {
- res |= SbMMP
- }
- if f.FlexBg {
- res |= SbFlexBg
- }
- if f.LargeDir {
- res |= SbLargeDir
- }
- if f.InlineData {
- res |= SbInlineData
- }
- if f.Encrypted {
- res |= SbEncrypted
- }
-
- return res
-}
-
-// IncompatFeaturesFromInt converts the integer representation of superblock
-// incompatible features to IncompatFeatures struct.
-func IncompatFeaturesFromInt(f uint32) IncompatFeatures {
- return IncompatFeatures{
- DirentFileType: f&SbDirentFileType > 0,
- Recovery: f&SbRecovery > 0,
- JournalDev: f&SbJournalDev > 0,
- MetaBG: f&SbMetaBG > 0,
- Extents: f&SbExtents > 0,
- Is64Bit: f&SbIs64Bit > 0,
- MMP: f&SbMMP > 0,
- FlexBg: f&SbFlexBg > 0,
- LargeDir: f&SbLargeDir > 0,
- InlineData: f&SbInlineData > 0,
- Encrypted: f&SbEncrypted > 0,
- }
-}
-
-// Superblock readonly compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbSparse indicates sparse superblocks. Only groups with number either 0 or
- // a power of 3, 5, or 7 will have redundant copies of the superblock and
- // block descriptors.
- SbSparse = 0x1
-
- // SbLargeFile indicates that this fs has been used to store a file >= 2GiB.
- SbLargeFile = 0x2
-
- // SbHugeFile indicates that this fs contains files whose sizes are
- // represented in units of logicals blocks, not 512-byte sectors.
- SbHugeFile = 0x8
-
- // SbGdtCsum indicates that group descriptors have checksums.
- SbGdtCsum = 0x10
-
- // SbDirNlink indicates that the new subdirectory limit is 64,999. Ext3 has a
- // 32,000 subdirectory limit.
- SbDirNlink = 0x20
-
- // SbExtraIsize indicates that large inodes exist on this filesystem.
- SbExtraIsize = 0x40
-
- // SbHasSnapshot indicates the existence of a snapshot.
- SbHasSnapshot = 0x80
-
- // SbQuota enables usage tracking for all quota types.
- SbQuota = 0x100
-
- // SbBigalloc maps to the bigalloc feature. When set, the minimum allocation
- // unit becomes a cluster rather than a data block. Then block bitmaps track
- // clusters, not data blocks.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#bigalloc.
- SbBigalloc = 0x200
-
- // SbMetadataCsum indicates that the fs supports metadata checksumming.
- SbMetadataCsum = 0x400
-
- // SbReadOnly marks this filesystem as readonly. Should refuse to mount in
- // read/write mode.
- SbReadOnly = 0x1000
-)
-
-// RoCompatFeatures represents a superblock's readonly compatible feature set.
-// If the kernel does not understand any of these feature, it can still mount
-// readonly. But if the user wants to mount read/write, the kernel should
-// refuse to mount.
-type RoCompatFeatures struct {
- Sparse bool
- LargeFile bool
- HugeFile bool
- GdtCsum bool
- DirNlink bool
- ExtraIsize bool
- HasSnapshot bool
- Quota bool
- Bigalloc bool
- MetadataCsum bool
- ReadOnly bool
-}
-
-// ToInt converts superblock readonly compatible features to its 32-bit rep.
-func (f RoCompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.Sparse {
- res |= SbSparse
- }
- if f.LargeFile {
- res |= SbLargeFile
- }
- if f.HugeFile {
- res |= SbHugeFile
- }
- if f.GdtCsum {
- res |= SbGdtCsum
- }
- if f.DirNlink {
- res |= SbDirNlink
- }
- if f.ExtraIsize {
- res |= SbExtraIsize
- }
- if f.HasSnapshot {
- res |= SbHasSnapshot
- }
- if f.Quota {
- res |= SbQuota
- }
- if f.Bigalloc {
- res |= SbBigalloc
- }
- if f.MetadataCsum {
- res |= SbMetadataCsum
- }
- if f.ReadOnly {
- res |= SbReadOnly
- }
-
- return res
-}
-
-// RoCompatFeaturesFromInt converts the integer representation of superblock
-// readonly compatible features to RoCompatFeatures struct.
-func RoCompatFeaturesFromInt(f uint32) RoCompatFeatures {
- return RoCompatFeatures{
- Sparse: f&SbSparse > 0,
- LargeFile: f&SbLargeFile > 0,
- HugeFile: f&SbHugeFile > 0,
- GdtCsum: f&SbGdtCsum > 0,
- DirNlink: f&SbDirNlink > 0,
- ExtraIsize: f&SbExtraIsize > 0,
- HasSnapshot: f&SbHasSnapshot > 0,
- Quota: f&SbQuota > 0,
- Bigalloc: f&SbBigalloc > 0,
- MetadataCsum: f&SbMetadataCsum > 0,
- ReadOnly: f&SbReadOnly > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
deleted file mode 100644
index 4dc6080fb..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock32Bit implements SuperBlock and represents the 32-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. Should be used only if
-// RevLevel = DynamicRev and 64-bit feature is disabled.
-//
-// +marshal
-type SuperBlock32Bit struct {
- // We embed the old superblock struct here because the 32-bit version is just
- // an extension of the old version.
- SuperBlockOld
-
- FirstInode uint32
- InodeSizeRaw uint16
- BlockGroupNumber uint16
- FeatureCompat uint32
- FeatureIncompat uint32
- FeatureRoCompat uint32
- UUID [16]byte
- VolumeName [16]byte
- LastMounted [64]byte
- AlgoUsageBitmap uint32
- PreallocBlocks uint8
- PreallocDirBlocks uint8
- ReservedGdtBlocks uint16
- JournalUUID [16]byte
- JournalInum uint32
- JournalDev uint32
- LastOrphan uint32
- HashSeed [4]uint32
- DefaultHashVersion uint8
- JnlBackupType uint8
- BgDescSizeRaw uint16
- DefaultMountOpts uint32
- FirstMetaBg uint32
- MkfsTime uint32
- JnlBlocks [17]uint32
-}
-
-// Compiles only if SuperBlock32Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock32Bit)(nil)
-
-// Only override methods which change based on the additional fields above.
-// Not overriding SuperBlock.BgDescSize because it would still return 32 here.
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlock32Bit) InodeSize() uint16 {
- return sb.InodeSizeRaw
-}
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlock32Bit) CompatibleFeatures() CompatFeatures {
- return CompatFeaturesFromInt(sb.FeatureCompat)
-}
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlock32Bit) IncompatibleFeatures() IncompatFeatures {
- return IncompatFeaturesFromInt(sb.FeatureIncompat)
-}
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlock32Bit) ReadOnlyCompatibleFeatures() RoCompatFeatures {
- return RoCompatFeaturesFromInt(sb.FeatureRoCompat)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
deleted file mode 100644
index 2c9039327..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock64Bit implements SuperBlock and represents the 64-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. This sums up to be exactly
-// 1024 bytes (smallest possible block size) and hence the superblock always
-// fits in no more than one data block. Should only be used when the 64-bit
-// feature is set.
-//
-// +marshal
-type SuperBlock64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- SuperBlock32Bit
-
- BlocksCountHi uint32
- ReservedBlocksCountHi uint32
- FreeBlocksCountHi uint32
- MinInodeSize uint16
- WantInodeSize uint16
- Flags uint32
- RaidStride uint16
- MmpInterval uint16
- MmpBlock uint64
- RaidStripeWidth uint32
- LogGroupsPerFlex uint8
- ChecksumType uint8
- _ uint16
- KbytesWritten uint64
- SnapshotInum uint32
- SnapshotID uint32
- SnapshotRsrvBlocksCount uint64
- SnapshotList uint32
- ErrorCount uint32
- FirstErrorTime uint32
- FirstErrorInode uint32
- FirstErrorBlock uint64
- FirstErrorFunction [32]byte
- FirstErrorLine uint32
- LastErrorTime uint32
- LastErrorInode uint32
- LastErrorLine uint32
- LastErrorBlock uint64
- LastErrorFunction [32]byte
- MountOpts [64]byte
- UserQuotaInum uint32
- GroupQuotaInum uint32
- OverheadBlocks uint32
- BackupBgs [2]uint32
- EncryptAlgos [4]uint8
- EncryptPwSalt [16]uint8
- LostFoundInode uint32
- ProjectQuotaInode uint32
- ChecksumSeed uint32
- WtimeHi uint8
- MtimeHi uint8
- MkfsTimeHi uint8
- LastCheckHi uint8
- FirstErrorTimeHi uint8
- LastErrorTimeHi uint8
- _ [2]uint8
- Encoding uint16
- EncodingFlags uint16
- _ [95]uint32
- Checksum uint32
-}
-
-// Compiles only if SuperBlock64Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock64Bit)(nil)
-
-// Only override methods which change based on the 64-bit feature.
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlock64Bit) BlocksCount() uint64 {
- return (uint64(sb.BlocksCountHi) << 32) | uint64(sb.BlocksCountLo)
-}
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlock64Bit) FreeBlocksCount() uint64 {
- return (uint64(sb.FreeBlocksCountHi) << 32) | uint64(sb.FreeBlocksCountLo)
-}
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlock64Bit) BgDescSize() uint16 { return sb.BgDescSizeRaw }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
deleted file mode 100644
index e4709f23c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlockOld implements SuperBlock and represents the old version of the
-// superblock struct. Should be used only if RevLevel = OldRev.
-//
-// +marshal
-type SuperBlockOld struct {
- InodesCountRaw uint32
- BlocksCountLo uint32
- ReservedBlocksCount uint32
- FreeBlocksCountLo uint32
- FreeInodesCountRaw uint32
- FirstDataBlockRaw uint32
- LogBlockSize uint32
- LogClusterSize uint32
- BlocksPerGroupRaw uint32
- ClustersPerGroupRaw uint32
- InodesPerGroupRaw uint32
- Mtime uint32
- Wtime uint32
- MountCountRaw uint16
- MaxMountCountRaw uint16
- MagicRaw uint16
- State uint16
- Errors uint16
- MinorRevLevel uint16
- LastCheck uint32
- CheckInterval uint32
- CreatorOS uint32
- RevLevel uint32
- DefResUID uint16
- DefResGID uint16
-}
-
-// Compiles only if SuperBlockOld implements SuperBlock.
-var _ SuperBlock = (*SuperBlockOld)(nil)
-
-// InodesCount implements SuperBlock.InodesCount.
-func (sb *SuperBlockOld) InodesCount() uint32 { return sb.InodesCountRaw }
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlockOld) BlocksCount() uint64 { return uint64(sb.BlocksCountLo) }
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlockOld) FreeBlocksCount() uint64 { return uint64(sb.FreeBlocksCountLo) }
-
-// FreeInodesCount implements SuperBlock.FreeInodesCount.
-func (sb *SuperBlockOld) FreeInodesCount() uint32 { return sb.FreeInodesCountRaw }
-
-// MountCount implements SuperBlock.MountCount.
-func (sb *SuperBlockOld) MountCount() uint16 { return sb.MountCountRaw }
-
-// MaxMountCount implements SuperBlock.MaxMountCount.
-func (sb *SuperBlockOld) MaxMountCount() uint16 { return sb.MaxMountCountRaw }
-
-// FirstDataBlock implements SuperBlock.FirstDataBlock.
-func (sb *SuperBlockOld) FirstDataBlock() uint32 { return sb.FirstDataBlockRaw }
-
-// BlockSize implements SuperBlock.BlockSize.
-func (sb *SuperBlockOld) BlockSize() uint64 { return 1 << (10 + sb.LogBlockSize) }
-
-// BlocksPerGroup implements SuperBlock.BlocksPerGroup.
-func (sb *SuperBlockOld) BlocksPerGroup() uint32 { return sb.BlocksPerGroupRaw }
-
-// ClusterSize implements SuperBlock.ClusterSize.
-func (sb *SuperBlockOld) ClusterSize() uint64 { return 1 << (10 + sb.LogClusterSize) }
-
-// ClustersPerGroup implements SuperBlock.ClustersPerGroup.
-func (sb *SuperBlockOld) ClustersPerGroup() uint32 { return sb.ClustersPerGroupRaw }
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlockOld) InodeSize() uint16 { return OldInodeSize }
-
-// InodesPerGroup implements SuperBlock.InodesPerGroup.
-func (sb *SuperBlockOld) InodesPerGroup() uint32 { return sb.InodesPerGroupRaw }
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlockOld) BgDescSize() uint16 { return 32 }
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlockOld) CompatibleFeatures() CompatFeatures { return CompatFeatures{} }
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlockOld) IncompatibleFeatures() IncompatFeatures { return IncompatFeatures{} }
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlockOld) ReadOnlyCompatibleFeatures() RoCompatFeatures { return RoCompatFeatures{} }
-
-// Magic implements SuperBlock.Magic.
-func (sb *SuperBlockOld) Magic() uint16 { return sb.MagicRaw }
-
-// Revision implements SuperBlock.Revision.
-func (sb *SuperBlockOld) Revision() SbRevision { return SbRevision(sb.RevLevel) }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go
deleted file mode 100644
index b734b6987..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestSuperBlockSize tests that the superblock structs are of the correct
-// size.
-func TestSuperBlockSize(t *testing.T) {
- var sbOld SuperBlockOld
- assertSize(t, &sbOld, 84)
- var sb32 SuperBlock32Bit
- assertSize(t, &sb32, 336)
- var sb64 SuperBlock64Bit
- assertSize(t, &sb64, 1024)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go b/pkg/sentry/fsimpl/ext/disklayout/test_utils.go
deleted file mode 100644
index a4bc08411..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-func assertSize(t *testing.T, v marshal.Marshallable, want int) {
- t.Helper()
-
- if got := v.SizeBytes(); got != want {
- t.Errorf("struct %s should be exactly %d bytes but is %d bytes", reflect.TypeOf(v).Name(), want, got)
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/ext.go b/pkg/sentry/fsimpl/ext/ext.go
deleted file mode 100644
index 38fb7962b..000000000
--- a/pkg/sentry/fsimpl/ext/ext.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ext implements readonly ext(2/3/4) filesystems.
-package ext
-
-import (
- "errors"
- "fmt"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// Name is the name of this filesystem.
-const Name = "ext"
-
-// FilesystemType implements vfs.FilesystemType.
-//
-// +stateify savable
-type FilesystemType struct{}
-
-// getDeviceFd returns an io.ReaderAt to the underlying device.
-// Currently there are two ways of mounting an ext(2/3/4) fs:
-// 1. Specify a mount with our internal special MountType in the OCI spec.
-// 2. Expose the device to the container and mount it from application layer.
-func getDeviceFd(source string, opts vfs.GetFilesystemOptions) (io.ReaderAt, error) {
- if opts.InternalData == nil {
- // User mount call.
- // TODO(b/134676337): Open the device specified by `source` and return that.
- panic("unimplemented")
- }
-
- // GetFilesystem call originated from within the sentry.
- devFd, ok := opts.InternalData.(int)
- if !ok {
- return nil, errors.New("internal data for ext fs must be an int containing the file descriptor to device")
- }
-
- if devFd < 0 {
- return nil, fmt.Errorf("ext device file descriptor is not valid: %d", devFd)
- }
-
- // The fd.ReadWriter returned from fd.NewReadWriter() does not take ownership
- // of the file descriptor and hence will not close it when it is garbage
- // collected.
- return fd.NewReadWriter(devFd), nil
-}
-
-// isCompatible checks if the superblock has feature sets which are compatible.
-// We only need to check the superblock incompatible feature set since we are
-// mounting readonly. We will also need to check readonly compatible feature
-// set when mounting for read/write.
-func isCompatible(sb disklayout.SuperBlock) bool {
- // Please note that what is being checked is limited based on the fact that we
- // are mounting readonly and that we are not journaling. When mounting
- // read/write or with a journal, this must be reevaluated.
- incompatFeatures := sb.IncompatibleFeatures()
- if incompatFeatures.MetaBG {
- log.Warningf("ext fs: meta block groups are not supported")
- return false
- }
- if incompatFeatures.MMP {
- log.Warningf("ext fs: multiple mount protection is not supported")
- return false
- }
- if incompatFeatures.Encrypted {
- log.Warningf("ext fs: encrypted inodes not supported")
- return false
- }
- if incompatFeatures.InlineData {
- log.Warningf("ext fs: inline files not supported")
- return false
- }
- return true
-}
-
-// Name implements vfs.FilesystemType.Name.
-func (FilesystemType) Name() string {
- return Name
-}
-
-// Release implements vfs.FilesystemType.Release.
-func (FilesystemType) Release(ctx context.Context) {}
-
-// GetFilesystem implements vfs.FilesystemType.GetFilesystem.
-func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- // TODO(b/134676337): Ensure that the user is mounting readonly. If not,
- // EACCESS should be returned according to mount(2). Filesystem independent
- // flags (like readonly) are currently not available in pkg/sentry/vfs.
-
- devMinor, err := vfsObj.GetAnonBlockDevMinor()
- if err != nil {
- return nil, nil, err
- }
-
- dev, err := getDeviceFd(source, opts)
- if err != nil {
- return nil, nil, err
- }
-
- fs := filesystem{
- dev: dev,
- inodeCache: make(map[uint32]*inode),
- devMinor: devMinor,
- }
- fs.vfsfs.Init(vfsObj, &fsType, &fs)
- fs.sb, err = readSuperBlock(dev)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- if fs.sb.Magic() != linux.EXT_SUPER_MAGIC {
- // mount(2) specifies that EINVAL should be returned if the superblock is
- // invalid.
- fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EINVAL
- }
-
- // Refuse to mount if the filesystem is incompatible.
- if !isCompatible(fs.sb) {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EINVAL
- }
-
- fs.bgs, err = readBlockGroups(dev, fs.sb)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- rootInode, err := fs.getOrCreateInodeLocked(disklayout.RootDirInode)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
- rootInode.incRef()
-
- return &fs.vfsfs, &newDentry(rootInode).vfsd, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/ext_test.go b/pkg/sentry/fsimpl/ext/ext_test.go
deleted file mode 100644
index d9fd4590c..000000000
--- a/pkg/sentry/fsimpl/ext/ext_test.go
+++ /dev/null
@@ -1,926 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "io"
- "os"
- "path"
- "sort"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- assetsDir = "pkg/sentry/fsimpl/ext/assets"
-)
-
-var (
- ext2ImagePath = path.Join(assetsDir, "tiny.ext2")
- ext3ImagePath = path.Join(assetsDir, "tiny.ext3")
- ext4ImagePath = path.Join(assetsDir, "tiny.ext4")
-)
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is non-nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(t *testing.T, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- localImagePath, err := testutil.FindFile(imagePath)
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("failed to open local image at path %s: %v", imagePath, err)
- }
-
- f, err := os.Open(localImagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("extfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, localImagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- t.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// TODO(b/134676337): Test vfs.FilesystemImpl.ReadlinkAt and
-// vfs.FilesystemImpl.StatFSAt which are not implemented in
-// vfs.VirtualFilesystem yet.
-
-// TestSeek tests vfs.FileDescriptionImpl.Seek functionality.
-func TestSeek(t *testing.T) {
- type seekTest struct {
- name string
- image string
- path string
- }
-
- tests := []seekTest{
- {
- name: "ext4 root dir seek",
- image: ext4ImagePath,
- path: "/",
- },
- {
- name: "ext3 root dir seek",
- image: ext3ImagePath,
- path: "/",
- },
- {
- name: "ext2 root dir seek",
- image: ext2ImagePath,
- path: "/",
- },
- {
- name: "ext4 reg file seek",
- image: ext4ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext3 reg file seek",
- image: ext3ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext2 reg file seek",
- image: ext2ImagePath,
- path: "/file.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- if n, err := fd.Seek(ctx, 0, linux.SEEK_SET); n != 0 || err != nil {
- t.Errorf("expected seek position 0, got %d and error %v", n, err)
- }
-
- stat, err := fd.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("fd.stat failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- // We should be able to seek beyond the end of file.
- size := int64(stat.Size)
- if n, err := fd.Seek(ctx, size, linux.SEEK_SET); n != size || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -1, linux.SEEK_SET); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- if n, err := fd.Seek(ctx, 3, linux.SEEK_CUR); n != size+3 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+3, n, err)
- }
-
- // Make sure negative offsets work with SEEK_CUR.
- if n, err := fd.Seek(ctx, -2, linux.SEEK_CUR); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 2), linux.SEEK_CUR); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- // Make sure SEEK_END works with regular files.
- if _, ok := fd.Impl().(*regularFileFD); ok {
- // Seek back to 0.
- if n, err := fd.Seek(ctx, -size, linux.SEEK_END); n != 0 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", 0, n, err)
- }
-
- // Seek forward beyond EOF.
- if n, err := fd.Seek(ctx, 1, linux.SEEK_END); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 1), linux.SEEK_END); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
- }
- })
- }
-}
-
-// TestStatAt tests filesystem.StatAt functionality.
-func TestStatAt(t *testing.T) {
- type statAtTest struct {
- name string
- image string
- path string
- want linux.Statx
- }
-
- tests := []statAtTest{
- {
- name: "ext4 statx small file",
- image: ext4ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext3 statx small file",
- image: ext3ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext2 statx small file",
- image: ext2ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext4 statx big file",
- image: ext4ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext3 statx big file",
- image: ext3ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext2 statx big file",
- image: ext2ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext4 statx symlink file",
- image: ext4ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext3 statx symlink file",
- image: ext3ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext2 statx symlink file",
- image: ext2ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- }
-
- // Ignore the fields that are not supported by filesystem.StatAt yet and
- // those which are likely to change as the image does.
- ignoredFields := map[string]bool{
- "Attributes": true,
- "AttributesMask": true,
- "Atime": true,
- "Blocks": true,
- "Btime": true,
- "Ctime": true,
- "DevMajor": true,
- "DevMinor": true,
- "Ino": true,
- "Mask": true,
- "Mtime": true,
- "RdevMajor": true,
- "RdevMinor": true,
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- got, err := vfsfs.StatAt(ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.StatOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.StatAt failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- _, ok := ignoredFields[p.String()]
- return ok
- }, cmp.Ignore())
- if diff := cmp.Diff(got, test.want, cmpIgnoreFields, cmpopts.IgnoreUnexported(linux.Statx{})); diff != "" {
- t.Errorf("stat mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRead tests the read functionality for vfs file descriptions.
-func TestRead(t *testing.T) {
- type readTest struct {
- name string
- image string
- absPath string
- }
-
- tests := []readTest{
- {
- name: "ext4 read small file",
- image: ext4ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext3 read small file",
- image: ext3ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext2 read small file",
- image: ext2ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext4 read big file",
- image: ext4ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext3 read big file",
- image: ext3ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext2 read big file",
- image: ext2ImagePath,
- absPath: "/bigfile.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.absPath)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- // Get a local file descriptor and compare its functionality with a vfs file
- // description for the same file.
- localFile, err := testutil.FindFile(path.Join(assetsDir, test.absPath))
- if err != nil {
- t.Fatalf("testutil.FindFile failed for %s: %v", test.absPath, err)
- }
-
- f, err := os.Open(localFile)
- if err != nil {
- t.Fatalf("os.Open failed for %s: %v", localFile, err)
- }
- defer f.Close()
-
- // Read the entire file by reading one byte repeatedly. Doing this stress
- // tests the underlying file reader implementation.
- got := make([]byte, 1)
- want := make([]byte, 1)
- for {
- n, err := f.Read(want)
- fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{})
-
- if diff := cmp.Diff(got, want); diff != "" {
- t.Errorf("file data mismatch (-want +got):\n%s", diff)
- }
-
- // Make sure there is no more file data left after getting EOF.
- if n == 0 || err == io.EOF {
- if n, _ := fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{}); n != 0 {
- t.Errorf("extra unexpected file data in file %s in image %s", test.absPath, test.image)
- }
-
- break
- }
-
- if err != nil {
- t.Fatalf("read failed: %v", err)
- }
- }
- })
- }
-}
-
-// iterDirentsCb is a simple callback which just keeps adding the dirents to an
-// internal list. Implements vfs.IterDirentsCallback.
-type iterDirentsCb struct {
- dirents []vfs.Dirent
-}
-
-// Compiles only if iterDirentCb implements vfs.IterDirentsCallback.
-var _ vfs.IterDirentsCallback = (*iterDirentsCb)(nil)
-
-// newIterDirentsCb is the iterDirent
-func newIterDirentCb() *iterDirentsCb {
- return &iterDirentsCb{dirents: make([]vfs.Dirent, 0)}
-}
-
-// Handle implements vfs.IterDirentsCallback.Handle.
-func (cb *iterDirentsCb) Handle(dirent vfs.Dirent) error {
- cb.dirents = append(cb.dirents, dirent)
- return nil
-}
-
-// TestIterDirents tests the FileDescriptionImpl.IterDirents functionality.
-func TestIterDirents(t *testing.T) {
- type iterDirentTest struct {
- name string
- image string
- path string
- want []vfs.Dirent
- }
-
- wantDirents := []vfs.Dirent{
- {
- Name: ".",
- Type: linux.DT_DIR,
- },
- {
- Name: "..",
- Type: linux.DT_DIR,
- },
- {
- Name: "lost+found",
- Type: linux.DT_DIR,
- },
- {
- Name: "file.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "bigfile.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "symlink.txt",
- Type: linux.DT_LNK,
- },
- }
- tests := []iterDirentTest{
- {
- name: "ext4 root dir iteration",
- image: ext4ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext3 root dir iteration",
- image: ext3ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext2 root dir iteration",
- image: ext2ImagePath,
- path: "/",
- want: wantDirents,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- cb := &iterDirentsCb{}
- if err = fd.IterDirents(ctx, cb); err != nil {
- t.Fatalf("dir fd.IterDirents() failed: %v", err)
- }
-
- sort.Slice(cb.dirents, func(i int, j int) bool { return cb.dirents[i].Name < cb.dirents[j].Name })
- sort.Slice(test.want, func(i int, j int) bool { return test.want[i].Name < test.want[j].Name })
-
- // Ignore the inode number and offset of dirents because those are likely to
- // change as the underlying image changes.
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- return p.String() == "Ino" || p.String() == "NextOff"
- }, cmp.Ignore())
- if diff := cmp.Diff(cb.dirents, test.want, cmpIgnoreFields); diff != "" {
- t.Errorf("dirents mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRootDir tests that the root directory inode is correctly initialized and
-// returned from setUp.
-func TestRootDir(t *testing.T) {
- type inodeProps struct {
- Mode linux.FileMode
- UID auth.KUID
- GID auth.KGID
- Size uint64
- InodeSize uint16
- Links uint16
- Flags disklayout.InodeFlags
- }
-
- type rootDirTest struct {
- name string
- image string
- wantInode inodeProps
- }
-
- tests := []rootDirTest{
- {
- name: "ext4 root dir",
- image: ext4ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- Flags: disklayout.InodeFlags{Extents: true},
- },
- },
- {
- name: "ext3 root dir",
- image: ext3ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- {
- name: "ext2 root dir",
- image: ext2ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- d, ok := vd.Dentry().Impl().(*dentry)
- if !ok {
- t.Fatalf("ext dentry of incorrect type: %T", vd.Dentry().Impl())
- }
-
- // Offload inode contents into local structs for comparison.
- gotInode := inodeProps{
- Mode: d.inode.diskInode.Mode(),
- UID: d.inode.diskInode.UID(),
- GID: d.inode.diskInode.GID(),
- Size: d.inode.diskInode.Size(),
- InodeSize: d.inode.diskInode.InodeSize(),
- Links: d.inode.diskInode.LinksCount(),
- Flags: d.inode.diskInode.Flags(),
- }
-
- if diff := cmp.Diff(gotInode, test.wantInode); diff != "" {
- t.Errorf("inode mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestFilesystemInit tests that the filesystem superblock and block group
-// descriptors are correctly read in and initialized.
-func TestFilesystemInit(t *testing.T) {
- // sb only contains the immutable properties of the superblock.
- type sb struct {
- InodesCount uint32
- BlocksCount uint64
- MaxMountCount uint16
- FirstDataBlock uint32
- BlockSize uint64
- BlocksPerGroup uint32
- ClusterSize uint64
- ClustersPerGroup uint32
- InodeSize uint16
- InodesPerGroup uint32
- BgDescSize uint16
- Magic uint16
- Revision disklayout.SbRevision
- CompatFeatures disklayout.CompatFeatures
- IncompatFeatures disklayout.IncompatFeatures
- RoCompatFeatures disklayout.RoCompatFeatures
- }
-
- // bg only contains the immutable properties of the block group descriptor.
- type bg struct {
- InodeTable uint64
- BlockBitmap uint64
- InodeBitmap uint64
- ExclusionBitmap uint64
- Flags disklayout.BGFlags
- }
-
- type fsInitTest struct {
- name string
- image string
- wantSb sb
- wantBgs []bg
- }
-
- tests := []fsInitTest{
- {
- name: "ext4 filesystem init",
- image: ext4ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x40,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- Extents: true,
- Is64Bit: true,
- FlexBg: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- HugeFile: true,
- DirNlink: true,
- ExtraIsize: true,
- MetadataCsum: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x23,
- BlockBitmap: 0x3,
- InodeBitmap: 0x13,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext3 filesystem init",
- image: ext3ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext2 filesystem init",
- image: ext2ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fs, ok := vd.Mount().Filesystem().Impl().(*filesystem)
- if !ok {
- t.Fatalf("ext filesystem of incorrect type: %T", vd.Mount().Filesystem().Impl())
- }
-
- // Offload superblock and block group descriptors contents into
- // local structs for comparison.
- totalFreeInodes := uint32(0)
- totalFreeBlocks := uint64(0)
- gotSb := sb{
- InodesCount: fs.sb.InodesCount(),
- BlocksCount: fs.sb.BlocksCount(),
- MaxMountCount: fs.sb.MaxMountCount(),
- FirstDataBlock: fs.sb.FirstDataBlock(),
- BlockSize: fs.sb.BlockSize(),
- BlocksPerGroup: fs.sb.BlocksPerGroup(),
- ClusterSize: fs.sb.ClusterSize(),
- ClustersPerGroup: fs.sb.ClustersPerGroup(),
- InodeSize: fs.sb.InodeSize(),
- InodesPerGroup: fs.sb.InodesPerGroup(),
- BgDescSize: fs.sb.BgDescSize(),
- Magic: fs.sb.Magic(),
- Revision: fs.sb.Revision(),
- CompatFeatures: fs.sb.CompatibleFeatures(),
- IncompatFeatures: fs.sb.IncompatibleFeatures(),
- RoCompatFeatures: fs.sb.ReadOnlyCompatibleFeatures(),
- }
- gotNumBgs := len(fs.bgs)
- gotBgs := make([]bg, gotNumBgs)
- for i := 0; i < gotNumBgs; i++ {
- gotBgs[i].InodeTable = fs.bgs[i].InodeTable()
- gotBgs[i].BlockBitmap = fs.bgs[i].BlockBitmap()
- gotBgs[i].InodeBitmap = fs.bgs[i].InodeBitmap()
- gotBgs[i].ExclusionBitmap = fs.bgs[i].ExclusionBitmap()
- gotBgs[i].Flags = fs.bgs[i].Flags()
-
- totalFreeInodes += fs.bgs[i].FreeInodesCount()
- totalFreeBlocks += uint64(fs.bgs[i].FreeBlocksCount())
- }
-
- if diff := cmp.Diff(gotSb, test.wantSb); diff != "" {
- t.Errorf("superblock mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(gotBgs, test.wantBgs); diff != "" {
- t.Errorf("block group descriptors mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeInodes, fs.sb.FreeInodesCount()); diff != "" {
- t.Errorf("total free inodes mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeBlocks, fs.sb.FreeBlocksCount()); diff != "" {
- t.Errorf("total free blocks mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_file.go b/pkg/sentry/fsimpl/ext/extent_file.go
deleted file mode 100644
index 778460107..000000000
--- a/pkg/sentry/fsimpl/ext/extent_file.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "sort"
-
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// extentFile is a type of regular file which uses extents to store file data.
-//
-// +stateify savable
-type extentFile struct {
- regFile regularFile
-
- // root is the root extent node. This lives in the 60 byte diskInode.Data().
- // Immutable.
- root disklayout.ExtentNode
-}
-
-// Compiles only if extentFile implements io.ReaderAt.
-var _ io.ReaderAt = (*extentFile)(nil)
-
-// newExtentFile is the extent file constructor. It reads the entire extent
-// tree into memory.
-// TODO(b/134676337): Build extent tree on demand to reduce memory usage.
-func newExtentFile(args inodeArgs) (*extentFile, error) {
- file := &extentFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
- err := file.buildExtTree()
- if err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// buildExtTree builds the extent tree by reading it from disk by doing
-// running a simple DFS. It first reads the root node from the inode struct in
-// memory. Then it recursively builds the rest of the tree by reading it off
-// disk.
-//
-// Precondition: inode flag InExtents must be set.
-func (f *extentFile) buildExtTree() error {
- rootNodeData := f.regFile.inode.diskInode.Data()
-
- f.root.Header.UnmarshalBytes(rootNodeData[:disklayout.ExtentHeaderSize])
-
- // Root node can not have more than 4 entries: 60 bytes = 1 header + 4 entries.
- if f.root.Header.NumEntries > 4 {
- // read(2) specifies that EINVAL should be returned if the file is unsuitable
- // for reading.
- return syserror.EINVAL
- }
-
- f.root.Entries = make([]disklayout.ExtentEntryPair, f.root.Header.NumEntries)
- for i, off := uint16(0), disklayout.ExtentEntrySize; i < f.root.Header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if f.root.Header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
- curEntry.UnmarshalBytes(rootNodeData[off : off+disklayout.ExtentEntrySize])
- f.root.Entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if f.root.Header.Height > 0 {
- for i := uint16(0); i < f.root.Header.NumEntries; i++ {
- var err error
- if f.root.Entries[i].Node, err = f.buildExtTreeFromDisk(f.root.Entries[i].Entry); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// buildExtTreeFromDisk reads the extent tree nodes from disk and recursively
-// builds the tree. Performs a simple DFS. It returns the ExtentNode pointed to
-// by the ExtentEntry.
-func (f *extentFile) buildExtTreeFromDisk(entry disklayout.ExtentEntry) (*disklayout.ExtentNode, error) {
- var header disklayout.ExtentHeader
- off := entry.PhysicalBlock() * f.regFile.inode.blkSize
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), &header)
- if err != nil {
- return nil, err
- }
-
- entries := make([]disklayout.ExtentEntryPair, header.NumEntries)
- for i, off := uint16(0), off+disklayout.ExtentEntrySize; i < header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
-
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), curEntry)
- if err != nil {
- return nil, err
- }
- entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if header.Height > 0 {
- for i := uint16(0); i < header.NumEntries; i++ {
- var err error
- entries[i].Node, err = f.buildExtTreeFromDisk(entries[i].Entry)
- if err != nil {
- return nil, err
- }
- }
- }
-
- return &disklayout.ExtentNode{header, entries}, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *extentFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, syserror.EINVAL
- }
-
- if uint64(off) >= f.regFile.inode.diskInode.Size() {
- return 0, io.EOF
- }
-
- n, err := f.read(&f.root, uint64(off), dst)
- if n < len(dst) && err == nil {
- err = io.EOF
- }
- return n, err
-}
-
-// read is the recursive step of extentFile.ReadAt which traverses the extent
-// tree from the node passed and reads file data.
-func (f *extentFile) read(node *disklayout.ExtentNode, off uint64, dst []byte) (int, error) {
- // Perform a binary search for the node covering bytes starting at r.fileOff.
- // A highly fragmented filesystem can have upto 340 entries and so linear
- // search should be avoided. Finds the first entry which does not cover the
- // file block we want and subtracts 1 to get the desired index.
- fileBlk := uint32(off / f.regFile.inode.blkSize)
- n := len(node.Entries)
- found := sort.Search(n, func(i int) bool {
- return node.Entries[i].Entry.FileBlock() > fileBlk
- }) - 1
-
- // We should be in this recursive step only if the data we want exists under
- // the current node.
- if found < 0 {
- panic("searching for a file block in an extent entry which does not cover it")
- }
-
- read := 0
- toRead := len(dst)
- var curR int
- var err error
- for i := found; i < n && read < toRead; i++ {
- if node.Header.Height == 0 {
- curR, err = f.readFromExtent(node.Entries[i].Entry.(*disklayout.Extent), off, dst[read:])
- } else {
- curR, err = f.read(node.Entries[i].Node, off, dst[read:])
- }
-
- read += curR
- off += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- return read, nil
-}
-
-// readFromExtent reads file data from the extent. It takes advantage of the
-// sequential nature of extents and reads file data from multiple blocks in one
-// call.
-//
-// A non-nil error indicates that this is a partial read and there is probably
-// more to read from this extent. The caller should propagate the error upward
-// and not move to the next extent in the tree.
-//
-// A subsequent call to extentReader.Read should continue reading from where we
-// left off as expected.
-func (f *extentFile) readFromExtent(ex *disklayout.Extent, off uint64, dst []byte) (int, error) {
- curFileBlk := uint32(off / f.regFile.inode.blkSize)
- exFirstFileBlk := ex.FileBlock()
- exLastFileBlk := exFirstFileBlk + uint32(ex.Length) // This is exclusive.
-
- // We should be in this recursive step only if the data we want exists under
- // the current extent.
- if curFileBlk < exFirstFileBlk || exLastFileBlk <= curFileBlk {
- panic("searching for a file block in an extent which does not cover it")
- }
-
- curPhyBlk := uint64(curFileBlk-exFirstFileBlk) + ex.PhysicalBlock()
- readStart := curPhyBlk*f.regFile.inode.blkSize + (off % f.regFile.inode.blkSize)
-
- endPhyBlk := ex.PhysicalBlock() + uint64(ex.Length)
- extentEnd := endPhyBlk * f.regFile.inode.blkSize // This is exclusive.
-
- toRead := int(extentEnd - readStart)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], int64(readStart))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_test.go b/pkg/sentry/fsimpl/ext/extent_test.go
deleted file mode 100644
index 985f76ac0..000000000
--- a/pkg/sentry/fsimpl/ext/extent_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-const (
- // mockExtentBlkSize is the mock block size used for testing.
- // No block has more than 1 header + 4 entries.
- mockExtentBlkSize = uint64(64)
-)
-
-// The tree described below looks like:
-//
-// 0.{Head}[Idx][Idx]
-// / \
-// / \
-// 1.{Head}[Ext][Ext] 2.{Head}[Idx]
-// / | \
-// [Phy] [Phy, Phy] 3.{Head}[Ext]
-// |
-// [Phy, Phy, Phy]
-//
-// Legend:
-// - Head = ExtentHeader
-// - Idx = ExtentIdx
-// - Ext = Extent
-// - Phy = Physical Block
-//
-// Please note that ext4 might not construct extent trees looking like this.
-// This is purely for testing the tree traversal logic.
-var (
- node3 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 3,
- Length: 3,
- StartBlockLo: 6,
- },
- Node: nil,
- },
- },
- }
-
- node2 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 1,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 2,
- },
- Node: node3,
- },
- },
- }
-
- node1 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 0,
- Length: 1,
- StartBlockLo: 3,
- },
- Node: nil,
- },
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 1,
- Length: 2,
- StartBlockLo: 4,
- },
- Node: nil,
- },
- },
- }
-
- node0 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 2,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 0,
- ChildBlockLo: 0,
- },
- Node: node1,
- },
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 1,
- },
- Node: node2,
- },
- },
- }
-)
-
-// TestExtentReader stress tests extentReader functionality. It performs random
-// length reads from all possible positions in the extent tree.
-func TestExtentReader(t *testing.T) {
- mockExtentFile, want := extentTreeSetUp(t, node0)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockExtentFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// TestBuildExtentTree tests the extent tree building logic.
-func TestBuildExtentTree(t *testing.T) {
- mockExtentFile, _ := extentTreeSetUp(t, node0)
-
- opt := cmpopts.IgnoreUnexported(disklayout.ExtentIdx{}, disklayout.ExtentHeader{})
- if diff := cmp.Diff(&mockExtentFile.root, node0, opt); diff != "" {
- t.Errorf("extent tree mismatch (-want +got):\n%s", diff)
- }
-}
-
-// extentTreeSetUp writes the passed extent tree to a mock disk as an extent
-// tree. It also constucts a mock extent file with the same tree built in it.
-// It also writes random data file data and returns it.
-func extentTreeSetUp(t *testing.T, root *disklayout.ExtentNode) (*extentFile, []byte) {
- t.Helper()
-
- mockDisk := make([]byte, mockExtentBlkSize*10)
- mockExtentFile := &extentFile{}
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: uint32(mockExtentBlkSize) * getNumPhyBlks(root),
- },
- },
- blkSize: mockExtentBlkSize,
- }
- mockExtentFile.regFile.inode.init(args, &mockExtentFile.regFile)
-
- fileData := writeTree(&mockExtentFile.regFile.inode, mockDisk, node0, mockExtentBlkSize)
-
- if err := mockExtentFile.buildExtTree(); err != nil {
- t.Fatalf("inode.buildExtTree failed: %v", err)
- }
- return mockExtentFile, fileData
-}
-
-// writeTree writes the tree represented by `root` to the inode and disk. It
-// also writes random file data on disk.
-func writeTree(in *inode, disk []byte, root *disklayout.ExtentNode, mockExtentBlkSize uint64) []byte {
- rootData := in.diskInode.Data()
- root.Header.MarshalBytes(rootData)
- off := root.Header.SizeBytes()
- for _, ep := range root.Entries {
- ep.Entry.MarshalBytes(rootData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range root.Entries {
- if root.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeTreeToDisk is the recursive step for writeTree which writes the tree
-// on the disk only. Also writes random file data on disk.
-func writeTreeToDisk(disk []byte, curNode disklayout.ExtentEntryPair) []byte {
- nodeData := disk[curNode.Entry.PhysicalBlock()*mockExtentBlkSize:]
- curNode.Node.Header.MarshalBytes(nodeData)
- off := curNode.Node.Header.SizeBytes()
- for _, ep := range curNode.Node.Entries {
- ep.Entry.MarshalBytes(nodeData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range curNode.Node.Entries {
- if curNode.Node.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeFileDataToExtent writes random bytes to the blocks on disk that the
-// passed extent points to.
-func writeFileDataToExtent(disk []byte, ex *disklayout.Extent) []byte {
- phyExStartBlk := ex.PhysicalBlock()
- phyExStartOff := phyExStartBlk * mockExtentBlkSize
- phyExEndOff := phyExStartOff + uint64(ex.Length)*mockExtentBlkSize
- rand.Read(disk[phyExStartOff:phyExEndOff])
- return disk[phyExStartOff:phyExEndOff]
-}
-
-// getNumPhyBlks returns the number of physical blocks covered under the node.
-func getNumPhyBlks(node *disklayout.ExtentNode) uint32 {
- var res uint32
- for _, ep := range node.Entries {
- if node.Header.Height == 0 {
- res += uint32(ep.Entry.(*disklayout.Extent).Length)
- } else {
- res += getNumPhyBlks(ep.Node)
- }
- }
- return res
-}
diff --git a/pkg/sentry/fsimpl/ext/file_description.go b/pkg/sentry/fsimpl/ext/file_description.go
deleted file mode 100644
index 90b086468..000000000
--- a/pkg/sentry/fsimpl/ext/file_description.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// fileDescription is embedded by ext implementations of
-// vfs.FileDescriptionImpl.
-type fileDescription struct {
- vfsfd vfs.FileDescription
- vfs.FileDescriptionDefaultImpl
- vfs.LockFD
-}
-
-func (fd *fileDescription) filesystem() *filesystem {
- return fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)
-}
-
-func (fd *fileDescription) inode() *inode {
- return fd.vfsfd.Dentry().Impl().(*dentry).inode
-}
-
-// Stat implements vfs.FileDescriptionImpl.Stat.
-func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
- var stat linux.Statx
- fd.inode().statTo(&stat)
- return stat, nil
-}
-
-// SetStat implements vfs.FileDescriptionImpl.SetStat.
-func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
- if opts.Stat.Mask == 0 {
- return nil
- }
- return syserror.EPERM
-}
-
-// SetStat implements vfs.FileDescriptionImpl.StatFS.
-func (fd *fileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {
- var stat linux.Statfs
- fd.filesystem().statTo(&stat)
- return stat, nil
-}
-
-// Sync implements vfs.FileDescriptionImpl.Sync.
-func (fd *fileDescription) Sync(ctx context.Context) error {
- return nil
-}
diff --git a/pkg/sentry/fsimpl/ext/filesystem.go b/pkg/sentry/fsimpl/ext/filesystem.go
deleted file mode 100644
index 917f1873d..000000000
--- a/pkg/sentry/fsimpl/ext/filesystem.go
+++ /dev/null
@@ -1,550 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "errors"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-var (
- // errResolveDirent indicates that the vfs.ResolvingPath.Component() does
- // not exist on the dentry tree but does exist on disk. So it has to be read in
- // using the in-memory dirent and added to the dentry tree. Usually indicates
- // the need to lock filesystem.mu for writing.
- errResolveDirent = errors.New("resolve path component using dirent")
-)
-
-// filesystem implements vfs.FilesystemImpl.
-//
-// +stateify savable
-type filesystem struct {
- vfsfs vfs.Filesystem
-
- // mu serializes changes to the Dentry tree.
- mu sync.RWMutex `state:"nosave"`
-
- // dev represents the underlying fs device. It does not require protection
- // because io.ReaderAt permits concurrent read calls to it. It translates to
- // the pread syscall which passes on the read request directly to the device
- // driver. Device drivers are intelligent in serving multiple concurrent read
- // requests in the optimal order (taking locality into consideration).
- dev io.ReaderAt
-
- // inodeCache maps absolute inode numbers to the corresponding Inode struct.
- // Inodes should be removed from this once their reference count hits 0.
- //
- // Protected by mu because most additions (see IterDirents) and all removals
- // from this corresponds to a change in the dentry tree.
- inodeCache map[uint32]*inode
-
- // sb represents the filesystem superblock. Immutable after initialization.
- sb disklayout.SuperBlock
-
- // bgs represents all the block group descriptors for the filesystem.
- // Immutable after initialization.
- bgs []disklayout.BlockGroup
-
- // devMinor is this filesystem's device minor number. Immutable after
- // initialization.
- devMinor uint32
-}
-
-// Compiles only if filesystem implements vfs.FilesystemImpl.
-var _ vfs.FilesystemImpl = (*filesystem)(nil)
-
-// stepLocked resolves rp.Component() in parent directory vfsd. The write
-// parameter passed tells if the caller has acquired filesystem.mu for writing
-// or not. If set to true, an existing inode on disk can be added to the dentry
-// tree if not present already.
-//
-// stepLocked is loosely analogous to fs/namei.c:walk_component().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-// * inode == vfsd.Impl().(*Dentry).inode.
-func stepLocked(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, inode *inode, write bool) (*vfs.Dentry, *inode, error) {
- if !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, nil, err
- }
-
- for {
- name := rp.Component()
- if name == "." {
- rp.Advance()
- return vfsd, inode, nil
- }
- d := vfsd.Impl().(*dentry)
- if name == ".." {
- isRoot, err := rp.CheckRoot(ctx, vfsd)
- if err != nil {
- return nil, nil, err
- }
- if isRoot || d.parent == nil {
- rp.Advance()
- return vfsd, inode, nil
- }
- if err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil {
- return nil, nil, err
- }
- rp.Advance()
- return &d.parent.vfsd, d.parent.inode, nil
- }
-
- dir := inode.impl.(*directory)
- child, ok := dir.childCache[name]
- if !ok {
- // We may need to instantiate a new dentry for this child.
- childDirent, ok := dir.childMap[name]
- if !ok {
- // The underlying inode does not exist on disk.
- return nil, nil, syserror.ENOENT
- }
-
- if !write {
- // filesystem.mu must be held for writing to add to the dentry tree.
- return nil, nil, errResolveDirent
- }
-
- // Create and add the component's dirent to the dentry tree.
- fs := rp.Mount().Filesystem().Impl().(*filesystem)
- childInode, err := fs.getOrCreateInodeLocked(childDirent.diskDirent.Inode())
- if err != nil {
- return nil, nil, err
- }
- // incRef because this is being added to the dentry tree.
- childInode.incRef()
- child = newDentry(childInode)
- child.parent = d
- child.name = name
- dir.childCache[name] = child
- }
- if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
- return nil, nil, err
- }
- if child.inode.isSymlink() && rp.ShouldFollowSymlink() {
- if err := rp.HandleSymlink(child.inode.impl.(*symlink).target); err != nil {
- return nil, nil, err
- }
- continue
- }
- rp.Advance()
- return &child.vfsd, child.inode, nil
- }
-}
-
-// walkLocked resolves rp to an existing file. The write parameter
-// passed tells if the caller has acquired filesystem.mu for writing or not.
-// If set to true, additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkLocked is loosely analogous to Linux's fs/namei.c:path_lookupat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-func walkLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Done() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if rp.MustBeDir() && !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walkParentLocked resolves all but the last path component of rp to an
-// existing directory. It does not check that the returned directory is
-// searchable by the provider of rp. The write parameter passed tells if the
-// caller has acquired filesystem.mu for writing or not. If set to true,
-// additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkParentLocked is loosely analogous to Linux's fs/namei.c:path_parentat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-func walkParentLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Final() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walk resolves rp to an existing file. If parent is set to true, it resolves
-// the rp till the parent of the last component which should be an existing
-// directory. If parent is false then resolves rp entirely. Attemps to resolve
-// the path as far as it can with a read lock and upgrades the lock if needed.
-func (fs *filesystem) walk(ctx context.Context, rp *vfs.ResolvingPath, parent bool) (*vfs.Dentry, *inode, error) {
- var (
- vfsd *vfs.Dentry
- inode *inode
- err error
- )
-
- // Try walking with the hopes that all dentries have already been pulled out
- // of disk. This reduces congestion (allows concurrent walks).
- fs.mu.RLock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, false)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, false)
- }
- fs.mu.RUnlock()
-
- if err == errResolveDirent {
- // Upgrade lock and continue walking. Lock upgrading in the middle of the
- // walk is fine as this is a read only filesystem.
- fs.mu.Lock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, true)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, true)
- }
- fs.mu.Unlock()
- }
-
- return vfsd, inode, err
-}
-
-// getOrCreateInodeLocked gets the inode corresponding to the inode number passed in.
-// It creates a new one with the given inode number if one does not exist.
-// The caller must increment the ref count if adding this to the dentry tree.
-//
-// Precondition: must be holding fs.mu for writing.
-func (fs *filesystem) getOrCreateInodeLocked(inodeNum uint32) (*inode, error) {
- if in, ok := fs.inodeCache[inodeNum]; ok {
- return in, nil
- }
-
- in, err := newInode(fs, inodeNum)
- if err != nil {
- return nil, err
- }
-
- fs.inodeCache[inodeNum] = in
- return in, nil
-}
-
-// statTo writes the statfs fields to the output parameter.
-func (fs *filesystem) statTo(stat *linux.Statfs) {
- stat.Type = uint64(fs.sb.Magic())
- stat.BlockSize = int64(fs.sb.BlockSize())
- stat.Blocks = fs.sb.BlocksCount()
- stat.BlocksFree = fs.sb.FreeBlocksCount()
- stat.BlocksAvailable = fs.sb.FreeBlocksCount()
- stat.Files = uint64(fs.sb.InodesCount())
- stat.FilesFree = uint64(fs.sb.FreeInodesCount())
- stat.NameLength = disklayout.MaxFileName
- stat.FragmentSize = int64(fs.sb.BlockSize())
- // TODO(b/134676337): Set Statfs.Flags and Statfs.FSID.
-}
-
-// AccessAt implements vfs.Filesystem.Impl.AccessAt.
-func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return inode.checkPermissions(rp.Credentials(), ats)
-}
-
-// GetDentryAt implements vfs.FilesystemImpl.GetDentryAt.
-func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- if opts.CheckSearchable {
- if !inode.isDir() {
- return nil, syserror.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
- }
-
- inode.incRef()
- return vfsd, nil
-}
-
-// GetParentDentryAt implements vfs.FilesystemImpl.GetParentDentryAt.
-func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, true)
- if err != nil {
- return nil, err
- }
- inode.incRef()
- return vfsd, nil
-}
-
-// OpenAt implements vfs.FilesystemImpl.OpenAt.
-func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- // EROFS is returned if write access is needed.
- if vfs.MayWriteFileWithOpenFlags(opts.Flags) || opts.Flags&(linux.O_CREAT|linux.O_EXCL|linux.O_TMPFILE) != 0 {
- return nil, syserror.EROFS
- }
- return inode.open(rp, vfsd, &opts)
-}
-
-// ReadlinkAt implements vfs.FilesystemImpl.ReadlinkAt.
-func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- symlink, ok := inode.impl.(*symlink)
- if !ok {
- return "", syserror.EINVAL
- }
- return symlink.target, nil
-}
-
-// StatAt implements vfs.FilesystemImpl.StatAt.
-func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return linux.Statx{}, err
- }
- var stat linux.Statx
- inode.statTo(&stat)
- return stat, nil
-}
-
-// StatFSAt implements vfs.FilesystemImpl.StatFSAt.
-func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linux.Statfs, error) {
- if _, _, err := fs.walk(ctx, rp, false); err != nil {
- return linux.Statfs{}, err
- }
-
- var stat linux.Statfs
- fs.statTo(&stat)
- return stat, nil
-}
-
-// Release implements vfs.FilesystemImpl.Release.
-func (fs *filesystem) Release(ctx context.Context) {
- fs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
-}
-
-// Sync implements vfs.FilesystemImpl.Sync.
-func (fs *filesystem) Sync(ctx context.Context) error {
- // This is a readonly filesystem for now.
- return nil
-}
-
-// The vfs.FilesystemImpl functions below return EROFS because their respective
-// man pages say that EROFS must be returned if the path resolves to a file on
-// this read-only filesystem.
-
-// LinkAt implements vfs.FilesystemImpl.LinkAt.
-func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
-func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// MknodAt implements vfs.FilesystemImpl.MknodAt.
-func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// RenameAt implements vfs.FilesystemImpl.RenameAt.
-func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error {
- if rp.Done() {
- return syserror.ENOENT
- }
-
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// RmdirAt implements vfs.FilesystemImpl.RmdirAt.
-func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if !inode.isDir() {
- return syserror.ENOTDIR
- }
-
- return syserror.EROFS
-}
-
-// SetStatAt implements vfs.FilesystemImpl.SetStatAt.
-func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
-func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.
-func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if inode.isDir() {
- return syserror.EISDIR
- }
-
- return syserror.EROFS
-}
-
-// BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt.
-func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {
- return nil, err
- }
-
- // TODO(b/134676337): Support sockets.
- return nil, syserror.ECONNREFUSED
-}
-
-// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
-func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- return nil, syserror.ENOTSUP
-}
-
-// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.
-func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- return "", syserror.ENOTSUP
-}
-
-// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
-func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return syserror.ENOTSUP
-}
-
-// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
-func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return syserror.ENOTSUP
-}
-
-// PrependPath implements vfs.FilesystemImpl.PrependPath.
-func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDentry, b *fspath.Builder) error {
- fs.mu.RLock()
- defer fs.mu.RUnlock()
- return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
-}
diff --git a/pkg/sentry/fsimpl/ext/inode.go b/pkg/sentry/fsimpl/ext/inode.go
deleted file mode 100644
index 9009ba3c7..000000000
--- a/pkg/sentry/fsimpl/ext/inode.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// inode represents an ext inode.
-//
-// inode uses the same inheritance pattern that pkg/sentry/vfs structures use.
-// This has been done to increase memory locality.
-//
-// Implementations:
-// inode --
-// |-- dir
-// |-- symlink
-// |-- regular--
-// |-- extent file
-// |-- block map file
-//
-// +stateify savable
-type inode struct {
- // refs is a reference count. refs is accessed using atomic memory operations.
- refs int64
-
- // fs is the containing filesystem.
- fs *filesystem
-
- // inodeNum is the inode number of this inode on disk. This is used to
- // identify inodes within the ext filesystem.
- inodeNum uint32
-
- // blkSize is the fs data block size. Same as filesystem.sb.BlockSize().
- blkSize uint64
-
- // diskInode gives us access to the inode struct on disk. Immutable.
- diskInode disklayout.Inode
-
- locks vfs.FileLocks
-
- // This is immutable. The first field of the implementations must have inode
- // as the first field to ensure temporality.
- impl interface{}
-}
-
-// incRef increments the inode ref count.
-func (in *inode) incRef() {
- atomic.AddInt64(&in.refs, 1)
-}
-
-// tryIncRef tries to increment the ref count. Returns true if successful.
-func (in *inode) tryIncRef() bool {
- for {
- refs := atomic.LoadInt64(&in.refs)
- if refs == 0 {
- return false
- }
- if atomic.CompareAndSwapInt64(&in.refs, refs, refs+1) {
- return true
- }
- }
-}
-
-// decRef decrements the inode ref count and releases the inode resources if
-// the ref count hits 0.
-//
-// Precondition: Must have locked filesystem.mu.
-func (in *inode) decRef() {
- if refs := atomic.AddInt64(&in.refs, -1); refs == 0 {
- delete(in.fs.inodeCache, in.inodeNum)
- } else if refs < 0 {
- panic("ext.inode.decRef() called without holding a reference")
- }
-}
-
-// newInode is the inode constructor. Reads the inode off disk. Identifies
-// inodes based on the absolute inode number on disk.
-func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {
- if inodeNum == 0 {
- panic("inode number 0 on ext filesystems is not possible")
- }
-
- inodeRecordSize := fs.sb.InodeSize()
- var diskInode disklayout.Inode
- if inodeRecordSize == disklayout.OldInodeSize {
- diskInode = &disklayout.InodeOld{}
- } else {
- diskInode = &disklayout.InodeNew{}
- }
-
- // Calculate where the inode is actually placed.
- inodesPerGrp := fs.sb.InodesPerGroup()
- blkSize := fs.sb.BlockSize()
- inodeTableOff := fs.bgs[getBGNum(inodeNum, inodesPerGrp)].InodeTable() * blkSize
- inodeOff := inodeTableOff + uint64(uint32(inodeRecordSize)*getBGOff(inodeNum, inodesPerGrp))
-
- if err := readFromDisk(fs.dev, int64(inodeOff), diskInode); err != nil {
- return nil, err
- }
-
- // Build the inode based on its type.
- args := inodeArgs{
- fs: fs,
- inodeNum: inodeNum,
- blkSize: blkSize,
- diskInode: diskInode,
- }
-
- switch diskInode.Mode().FileType() {
- case linux.ModeSymlink:
- f, err := newSymlink(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeRegular:
- f, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeDirectory:
- f, err := newDirectory(args, fs.sb.IncompatibleFeatures().DirentFileType)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- default:
- // TODO(b/134676337): Return appropriate errors for sockets, pipes and devices.
- return nil, syserror.EINVAL
- }
-}
-
-type inodeArgs struct {
- fs *filesystem
- inodeNum uint32
- blkSize uint64
- diskInode disklayout.Inode
-}
-
-func (in *inode) init(args inodeArgs, impl interface{}) {
- in.fs = args.fs
- in.inodeNum = args.inodeNum
- in.blkSize = args.blkSize
- in.diskInode = args.diskInode
- in.impl = impl
-}
-
-// open creates and returns a file description for the dentry passed in.
-func (in *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
- ats := vfs.AccessTypesForOpenFlags(opts)
- if err := in.checkPermissions(rp.Credentials(), ats); err != nil {
- return nil, err
- }
- mnt := rp.Mount()
- switch in.impl.(type) {
- case *regularFile:
- var fd regularFileFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *directory:
- // Can't open directories writably. This check is not necessary for a read
- // only filesystem but will be required when write is implemented.
- if ats&vfs.MayWrite != 0 {
- return nil, syserror.EISDIR
- }
- var fd directoryFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *symlink:
- if opts.Flags&linux.O_PATH == 0 {
- // Can't open symlinks without O_PATH.
- return nil, syserror.ELOOP
- }
- var fd symlinkFD
- fd.LockFD.Init(&in.locks)
- fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{})
- return &fd.vfsfd, nil
- default:
- panic(fmt.Sprintf("unknown inode type: %T", in.impl))
- }
-}
-
-func (in *inode) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {
- return vfs.GenericCheckPermissions(creds, ats, in.diskInode.Mode(), in.diskInode.UID(), in.diskInode.GID())
-}
-
-// statTo writes the statx fields to the output parameter.
-func (in *inode) statTo(stat *linux.Statx) {
- stat.Mask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK |
- linux.STATX_UID | linux.STATX_GID | linux.STATX_INO | linux.STATX_SIZE |
- linux.STATX_ATIME | linux.STATX_CTIME | linux.STATX_MTIME
- stat.Blksize = uint32(in.blkSize)
- stat.Mode = uint16(in.diskInode.Mode())
- stat.Nlink = uint32(in.diskInode.LinksCount())
- stat.UID = uint32(in.diskInode.UID())
- stat.GID = uint32(in.diskInode.GID())
- stat.Ino = uint64(in.inodeNum)
- stat.Size = in.diskInode.Size()
- stat.Atime = in.diskInode.AccessTime().StatxTimestamp()
- stat.Ctime = in.diskInode.ChangeTime().StatxTimestamp()
- stat.Mtime = in.diskInode.ModificationTime().StatxTimestamp()
- stat.DevMajor = linux.UNNAMED_MAJOR
- stat.DevMinor = in.fs.devMinor
- // TODO(b/134676337): Set stat.Blocks which is the number of 512 byte blocks
- // (including metadata blocks) required to represent this file.
-}
-
-// getBGNum returns the block group number that a given inode belongs to.
-func getBGNum(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) / inodesPerGrp
-}
-
-// getBGOff returns the offset at which the given inode lives in the block
-// group's inode table, i.e. the index of the inode in the inode table.
-func getBGOff(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) % inodesPerGrp
-}
diff --git a/pkg/sentry/fsimpl/ext/regular_file.go b/pkg/sentry/fsimpl/ext/regular_file.go
deleted file mode 100644
index 4a5539b37..000000000
--- a/pkg/sentry/fsimpl/ext/regular_file.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/safemem"
- fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// regularFile represents a regular file's inode. This too follows the
-// inheritance pattern prevelant in the vfs layer described in
-// pkg/sentry/vfs/README.md.
-//
-// +stateify savable
-type regularFile struct {
- inode inode
-
- // This is immutable. The first field of fileReader implementations must be
- // regularFile to ensure temporality.
- // io.ReaderAt is more strict than io.Reader in the sense that a partial read
- // is always accompanied by an error. If a read spans past the end of file, a
- // partial read (within file range) is done and io.EOF is returned.
- impl io.ReaderAt
-}
-
-// newRegularFile is the regularFile constructor. It figures out what kind of
-// file this is and initializes the fileReader.
-func newRegularFile(args inodeArgs) (*regularFile, error) {
- if args.diskInode.Flags().Extents {
- file, err := newExtentFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
- }
-
- file, err := newBlockMapFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
-}
-
-func (in *inode) isRegular() bool {
- _, ok := in.impl.(*regularFile)
- return ok
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type regularFileFD struct {
- fileDescription
- vfs.LockFD
-
- // off is the file offset. off is accessed using atomic memory operations.
- off int64
-
- // offMu serializes operations that may mutate off.
- offMu sync.Mutex `state:"nosave"`
-}
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *regularFileFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- safeReader := safemem.FromIOReaderAt{
- ReaderAt: fd.inode().impl.(*regularFile).impl,
- Offset: offset,
- }
-
- // Copies data from disk directly into usermem without any intermediate
- // allocations (if dst is converted into BlockSeq such that it does not need
- // safe copying).
- return dst.CopyOutFrom(ctx, safeReader)
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *regularFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- n, err := fd.PRead(ctx, dst, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- // write(2) specifies that EBADF must be returned if the fd is not open for
- // writing.
- return 0, syserror.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- n, err := fd.PWrite(ctx, src, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *regularFileFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return syserror.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- fd.offMu.Lock()
- defer fd.offMu.Unlock()
- switch whence {
- case linux.SEEK_SET:
- // Use offset as specified.
- case linux.SEEK_CUR:
- offset += fd.off
- case linux.SEEK_END:
- offset += int64(fd.inode().diskInode.Size())
- default:
- return 0, syserror.EINVAL
- }
- if offset < 0 {
- return 0, syserror.EINVAL
- }
- fd.off = offset
- return offset, nil
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- // TODO(b/134676337): Implement mmap(2).
- return syserror.ENODEV
-}
-
-// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.
-func (fd *regularFileFD) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, start, length uint64, whence int16, block fslock.Blocker) error {
- return fd.Locks().LockPOSIX(ctx, &fd.vfsfd, uid, t, start, length, whence, block)
-}
-
-// UnlockPOSIX implements vfs.FileDescriptionImpl.UnlockPOSIX.
-func (fd *regularFileFD) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, start, length uint64, whence int16) error {
- return fd.Locks().UnlockPOSIX(ctx, &fd.vfsfd, uid, start, length, whence)
-}
diff --git a/pkg/sentry/fsimpl/ext/symlink.go b/pkg/sentry/fsimpl/ext/symlink.go
deleted file mode 100644
index 5e2bcc837..000000000
--- a/pkg/sentry/fsimpl/ext/symlink.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// symlink represents a symlink inode.
-//
-// +stateify savable
-type symlink struct {
- inode inode
- target string // immutable
-}
-
-// newSymlink is the symlink constructor. It reads out the symlink target from
-// the inode (however it might have been stored).
-func newSymlink(args inodeArgs) (*symlink, error) {
- var link []byte
-
- // If the symlink target is lesser than 60 bytes, its stores in inode.Data().
- // Otherwise either extents or block maps will be used to store the link.
- size := args.diskInode.Size()
- if size < 60 {
- link = args.diskInode.Data()[:size]
- } else {
- // Create a regular file out of this inode and read out the target.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- link = make([]byte, size)
- if n, err := regFile.impl.ReadAt(link, 0); uint64(n) < size {
- return nil, err
- }
- }
-
- file := &symlink{target: string(link)}
- file.inode.init(args, file)
- return file, nil
-}
-
-func (in *inode) isSymlink() bool {
- _, ok := in.impl.(*symlink)
- return ok
-}
-
-// symlinkFD represents a symlink file description and implements
-// vfs.FileDescriptionImpl. which may only be used if open options contains
-// O_PATH. For this reason most of the functions return EBADF.
-//
-// +stateify savable
-type symlinkFD struct {
- fileDescription
- vfs.NoLockFD
-}
-
-// Compiles only if symlinkFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*symlinkFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *symlinkFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *symlinkFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *symlinkFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *symlinkFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *symlinkFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *symlinkFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return syserror.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *symlinkFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *symlinkFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- return syserror.EBADF
-}
diff --git a/pkg/sentry/fsimpl/ext/utils.go b/pkg/sentry/fsimpl/ext/utils.go
deleted file mode 100644
index 58ef7b9b8..000000000
--- a/pkg/sentry/fsimpl/ext/utils.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// readFromDisk performs a binary read from disk into the given struct from
-// the absolute offset provided.
-func readFromDisk(dev io.ReaderAt, abOff int64, v marshal.Marshallable) error {
- n := v.SizeBytes()
- buf := make([]byte, n)
- if read, _ := dev.ReadAt(buf, abOff); read < int(n) {
- return syserror.EIO
- }
-
- v.UnmarshalBytes(buf)
- return nil
-}
-
-// readSuperBlock reads the SuperBlock from block group 0 in the underlying
-// device. There are three versions of the superblock. This function identifies
-// and returns the correct version.
-func readSuperBlock(dev io.ReaderAt) (disklayout.SuperBlock, error) {
- var sb disklayout.SuperBlock = &disklayout.SuperBlockOld{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if sb.Revision() == disklayout.OldRev {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock32Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if !sb.IncompatibleFeatures().Is64Bit {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock64Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- return sb, nil
-}
-
-// blockGroupsCount returns the number of block groups in the ext fs.
-func blockGroupsCount(sb disklayout.SuperBlock) uint64 {
- blocksCount := sb.BlocksCount()
- blocksPerGroup := uint64(sb.BlocksPerGroup())
-
- // Round up the result. float64 can compromise precision so do it manually.
- return (blocksCount + blocksPerGroup - 1) / blocksPerGroup
-}
-
-// readBlockGroups reads the block group descriptor table from block group 0 in
-// the underlying device.
-func readBlockGroups(dev io.ReaderAt, sb disklayout.SuperBlock) ([]disklayout.BlockGroup, error) {
- bgCount := blockGroupsCount(sb)
- bgdSize := uint64(sb.BgDescSize())
- is64Bit := sb.IncompatibleFeatures().Is64Bit
- bgds := make([]disklayout.BlockGroup, bgCount)
-
- for i, off := uint64(0), uint64(sb.FirstDataBlock()+1)*sb.BlockSize(); i < bgCount; i, off = i+1, off+bgdSize {
- if is64Bit {
- bgds[i] = &disklayout.BlockGroup64Bit{}
- } else {
- bgds[i] = &disklayout.BlockGroup32Bit{}
- }
-
- if err := readFromDisk(dev, int64(off), bgds[i]); err != nil {
- return nil, err
- }
- }
- return bgds, nil
-}
diff --git a/pkg/sentry/fsimpl/fuse/BUILD b/pkg/sentry/fsimpl/fuse/BUILD
deleted file mode 100644
index 2158b1bbc..000000000
--- a/pkg/sentry/fsimpl/fuse/BUILD
+++ /dev/null
@@ -1,87 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "request_list",
- out = "request_list.go",
- package = "fuse",
- prefix = "request",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Request",
- "Linker": "*Request",
- },
-)
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "fuse",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_library(
- name = "fuse",
- srcs = [
- "connection.go",
- "connection_control.go",
- "dev.go",
- "directory.go",
- "file.go",
- "fusefs.go",
- "inode_refs.go",
- "read_write.go",
- "register.go",
- "regular_file.go",
- "request_list.go",
- "request_response.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "fuse_test",
- size = "small",
- srcs = [
- "connection_test.go",
- "dev_test.go",
- "utils_test.go",
- ],
- library = ":fuse",
- deps = [
- "//pkg/abi/linux",
- "//pkg/marshal",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/fuse/connection_test.go b/pkg/sentry/fsimpl/fuse/connection_test.go
deleted file mode 100644
index 91d16c1cf..000000000
--- a/pkg/sentry/fsimpl/fuse/connection_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "math/rand"
- "syscall"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// TestConnectionInitBlock tests if initialization
-// correctly blocks and unblocks the connection.
-// Since it's unfeasible to test kernelTask.Block() in unit test,
-// the code in Call() are not tested here.
-func TestConnectionInitBlock(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
-
- conn, _, err := newTestConnection(s, k, maxActiveRequestsDefault)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- select {
- case <-conn.initializedChan:
- t.Fatalf("initializedChan should be blocking before SetInitialized")
- default:
- }
-
- conn.SetInitialized()
-
- select {
- case <-conn.initializedChan:
- default:
- t.Fatalf("initializedChan should not be blocking after SetInitialized")
- }
-}
-
-func TestConnectionAbort(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- creds := auth.CredentialsFromContext(s.Ctx)
- task := kernel.TaskFromContext(s.Ctx)
-
- const numRequests uint64 = 256
-
- conn, _, err := newTestConnection(s, k, numRequests)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- testObj := &testPayload{
- data: rand.Uint32(),
- }
-
- var futNormal []*futureResponse
-
- for i := 0; i < int(numRequests); i++ {
- req, err := conn.NewRequest(creds, uint32(i), uint64(i), 0, testObj)
- if err != nil {
- t.Fatalf("NewRequest creation failed: %v", err)
- }
- fut, err := conn.callFutureLocked(task, req)
- if err != nil {
- t.Fatalf("callFutureLocked failed: %v", err)
- }
- futNormal = append(futNormal, fut)
- }
-
- conn.Abort(s.Ctx)
-
- // Abort should unblock the initialization channel.
- // Note: no test requests are actually blocked on `conn.initializedChan`.
- select {
- case <-conn.initializedChan:
- default:
- t.Fatalf("initializedChan should not be blocking after SetInitialized")
- }
-
- // Abort will return ECONNABORTED error to unblocked requests.
- for _, fut := range futNormal {
- if fut.getResponse().hdr.Error != -int32(syscall.ECONNABORTED) {
- t.Fatalf("Incorrect error code received for aborted connection: %v", fut.getResponse().hdr.Error)
- }
- }
-
- // After abort, Call() should return directly with ENOTCONN.
- req, err := conn.NewRequest(creds, 0, 0, 0, testObj)
- if err != nil {
- t.Fatalf("NewRequest creation failed: %v", err)
- }
- _, err = conn.Call(task, req)
- if err != syserror.ENOTCONN {
- t.Fatalf("Incorrect error code received for Call() after connection aborted")
- }
-
-}
diff --git a/pkg/sentry/fsimpl/fuse/dev_test.go b/pkg/sentry/fsimpl/fuse/dev_test.go
deleted file mode 100644
index 5986133e9..000000000
--- a/pkg/sentry/fsimpl/fuse/dev_test.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "fmt"
- "math/rand"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/pkg/waiter"
-)
-
-// echoTestOpcode is the Opcode used during testing. The server used in tests
-// will simply echo the payload back with the appropriate headers.
-const echoTestOpcode linux.FUSEOpcode = 1000
-
-// TestFUSECommunication tests that the communication layer between the Sentry and the
-// FUSE server daemon works as expected.
-func TestFUSECommunication(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- creds := auth.CredentialsFromContext(s.Ctx)
-
- // Create test cases with different number of concurrent clients and servers.
- testCases := []struct {
- Name string
- NumClients int
- NumServers int
- MaxActiveRequests uint64
- }{
- {
- Name: "SingleClientSingleServer",
- NumClients: 1,
- NumServers: 1,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "SingleClientMultipleServers",
- NumClients: 1,
- NumServers: 10,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "MultipleClientsSingleServer",
- NumClients: 10,
- NumServers: 1,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "MultipleClientsMultipleServers",
- NumClients: 10,
- NumServers: 10,
- MaxActiveRequests: maxActiveRequestsDefault,
- },
- {
- Name: "RequestCapacityFull",
- NumClients: 10,
- NumServers: 1,
- MaxActiveRequests: 1,
- },
- {
- Name: "RequestCapacityContinuouslyFull",
- NumClients: 100,
- NumServers: 2,
- MaxActiveRequests: 2,
- },
- }
-
- for _, testCase := range testCases {
- t.Run(testCase.Name, func(t *testing.T) {
- conn, fd, err := newTestConnection(s, k, testCase.MaxActiveRequests)
- if err != nil {
- t.Fatalf("newTestConnection: %v", err)
- }
-
- clientsDone := make([]chan struct{}, testCase.NumClients)
- serversDone := make([]chan struct{}, testCase.NumServers)
- serversKill := make([]chan struct{}, testCase.NumServers)
-
- // FUSE clients.
- for i := 0; i < testCase.NumClients; i++ {
- clientsDone[i] = make(chan struct{})
- go func(i int) {
- fuseClientRun(t, s, k, conn, creds, uint32(i), uint64(i), clientsDone[i])
- }(i)
- }
-
- // FUSE servers.
- for j := 0; j < testCase.NumServers; j++ {
- serversDone[j] = make(chan struct{})
- serversKill[j] = make(chan struct{}, 1) // The kill command shouldn't block.
- go func(j int) {
- fuseServerRun(t, s, k, fd, serversDone[j], serversKill[j])
- }(j)
- }
-
- // Tear down.
- //
- // Make sure all the clients are done.
- for i := 0; i < testCase.NumClients; i++ {
- <-clientsDone[i]
- }
-
- // Kill any server that is potentially waiting.
- for j := 0; j < testCase.NumServers; j++ {
- serversKill[j] <- struct{}{}
- }
-
- // Make sure all the servers are done.
- for j := 0; j < testCase.NumServers; j++ {
- <-serversDone[j]
- }
- })
- }
-}
-
-// CallTest makes a request to the server and blocks the invoking
-// goroutine until a server responds with a response. Doesn't block
-// a kernel.Task. Analogous to Connection.Call but used for testing.
-func CallTest(conn *connection, t *kernel.Task, r *Request, i uint32) (*Response, error) {
- conn.fd.mu.Lock()
-
- // Wait until we're certain that a new request can be processed.
- for conn.fd.numActiveRequests == conn.fd.fs.opts.maxActiveRequests {
- conn.fd.mu.Unlock()
- select {
- case <-conn.fd.fullQueueCh:
- }
- conn.fd.mu.Lock()
- }
-
- fut, err := conn.callFutureLocked(t, r) // No task given.
- conn.fd.mu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- // Resolve the response.
- //
- // Block without a task.
- select {
- case <-fut.ch:
- }
-
- // A response is ready. Resolve and return it.
- return fut.getResponse(), nil
-}
-
-// ReadTest is analogous to vfs.FileDescription.Read and reads from the FUSE
-// device. However, it does so by - not blocking the task that is calling - and
-// instead just waits on a channel. The behaviour is essentially the same as
-// DeviceFD.Read except it guarantees that the task is not blocked.
-func ReadTest(serverTask *kernel.Task, fd *vfs.FileDescription, inIOseq usermem.IOSequence, killServer chan struct{}) (int64, bool, error) {
- var err error
- var n, total int64
-
- dev := fd.Impl().(*DeviceFD)
-
- // Register for notifications.
- w, ch := waiter.NewChannelEntry(nil)
- dev.EventRegister(&w, waiter.EventIn)
- for {
- // Issue the request and break out if it completes with anything other than
- // "would block".
- n, err = dev.Read(serverTask, inIOseq, vfs.ReadOptions{})
- total += n
- if err != syserror.ErrWouldBlock {
- break
- }
-
- // Wait for a notification that we should retry.
- // Emulate the blocking for when no requests are available
- select {
- case <-ch:
- case <-killServer:
- // Server killed by the main program.
- return 0, true, nil
- }
- }
-
- dev.EventUnregister(&w)
- return total, false, err
-}
-
-// fuseClientRun emulates all the actions of a normal FUSE request. It creates
-// a header, a payload, calls the server, waits for the response, and processes
-// the response.
-func fuseClientRun(t *testing.T, s *testutil.System, k *kernel.Kernel, conn *connection, creds *auth.Credentials, pid uint32, inode uint64, clientDone chan struct{}) {
- defer func() { clientDone <- struct{}{} }()
-
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- clientTask, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("fuse-client-%v", pid), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatal(err)
- }
- testObj := &testPayload{
- data: rand.Uint32(),
- }
-
- req, err := conn.NewRequest(creds, pid, inode, echoTestOpcode, testObj)
- if err != nil {
- t.Fatalf("NewRequest creation failed: %v", err)
- }
-
- // Queue up a request.
- // Analogous to Call except it doesn't block on the task.
- resp, err := CallTest(conn, clientTask, req, pid)
- if err != nil {
- t.Fatalf("CallTaskNonBlock failed: %v", err)
- }
-
- if err = resp.Error(); err != nil {
- t.Fatalf("Server responded with an error: %v", err)
- }
-
- var respTestPayload testPayload
- if err := resp.UnmarshalPayload(&respTestPayload); err != nil {
- t.Fatalf("Unmarshalling payload error: %v", err)
- }
-
- if resp.hdr.Unique != req.hdr.Unique {
- t.Fatalf("got response for another request. Expected response for req %v but got response for req %v",
- req.hdr.Unique, resp.hdr.Unique)
- }
-
- if respTestPayload.data != testObj.data {
- t.Fatalf("read incorrect data. Data expected: %v, but got %v", testObj.data, respTestPayload.data)
- }
-
-}
-
-// fuseServerRun creates a task and emulates all the actions of a simple FUSE server
-// that simply reads a request and echos the same struct back as a response using the
-// appropriate headers.
-func fuseServerRun(t *testing.T, s *testutil.System, k *kernel.Kernel, fd *vfs.FileDescription, serverDone, killServer chan struct{}) {
- defer func() { serverDone <- struct{}{} }()
-
- // Create the tasks that the server will be using.
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- var readPayload testPayload
-
- serverTask, err := testutil.CreateTask(s.Ctx, "fuse-server", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatal(err)
- }
-
- // Read the request.
- for {
- inHdrLen := uint32((*linux.FUSEHeaderIn)(nil).SizeBytes())
- payloadLen := uint32(readPayload.SizeBytes())
-
- // The raed buffer must meet some certain size criteria.
- buffSize := inHdrLen + payloadLen
- if buffSize < linux.FUSE_MIN_READ_BUFFER {
- buffSize = linux.FUSE_MIN_READ_BUFFER
- }
- inBuf := make([]byte, buffSize)
- inIOseq := usermem.BytesIOSequence(inBuf)
-
- n, serverKilled, err := ReadTest(serverTask, fd, inIOseq, killServer)
- if err != nil {
- t.Fatalf("Read failed :%v", err)
- }
-
- // Server should shut down. No new requests are going to be made.
- if serverKilled {
- break
- }
-
- if n <= 0 {
- t.Fatalf("Read read no bytes")
- }
-
- var readFUSEHeaderIn linux.FUSEHeaderIn
- readFUSEHeaderIn.UnmarshalUnsafe(inBuf[:inHdrLen])
- readPayload.UnmarshalUnsafe(inBuf[inHdrLen : inHdrLen+payloadLen])
-
- if readFUSEHeaderIn.Opcode != echoTestOpcode {
- t.Fatalf("read incorrect data. Header: %v, Payload: %v", readFUSEHeaderIn, readPayload)
- }
-
- // Write the response.
- outHdrLen := uint32((*linux.FUSEHeaderOut)(nil).SizeBytes())
- outBuf := make([]byte, outHdrLen+payloadLen)
- outHeader := linux.FUSEHeaderOut{
- Len: outHdrLen + payloadLen,
- Error: 0,
- Unique: readFUSEHeaderIn.Unique,
- }
-
- // Echo the payload back.
- outHeader.MarshalUnsafe(outBuf[:outHdrLen])
- readPayload.MarshalUnsafe(outBuf[outHdrLen:])
- outIOseq := usermem.BytesIOSequence(outBuf)
-
- n, err = fd.Write(s.Ctx, outIOseq, vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("Write failed :%v", err)
- }
- }
-}
diff --git a/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go b/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go
new file mode 100644
index 000000000..f59e82755
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/fuse_state_autogen.go
@@ -0,0 +1,525 @@
+// automatically generated by stateify.
+
+package fuse
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (conn *connection) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.connection"
+}
+
+func (conn *connection) StateFields() []string {
+ return []string{
+ "fd",
+ "attributeVersion",
+ "initialized",
+ "initializedChan",
+ "connected",
+ "connInitError",
+ "connInitSuccess",
+ "aborted",
+ "numWaiting",
+ "asyncNum",
+ "asyncCongestionThreshold",
+ "asyncNumMax",
+ "maxRead",
+ "maxWrite",
+ "maxPages",
+ "minor",
+ "atomicOTrunc",
+ "asyncRead",
+ "writebackCache",
+ "bigWrites",
+ "dontMask",
+ "noOpen",
+ }
+}
+
+func (conn *connection) beforeSave() {}
+
+func (conn *connection) StateSave(stateSinkObject state.Sink) {
+ conn.beforeSave()
+ var initializedChanValue bool = conn.saveInitializedChan()
+ stateSinkObject.SaveValue(3, initializedChanValue)
+ stateSinkObject.Save(0, &conn.fd)
+ stateSinkObject.Save(1, &conn.attributeVersion)
+ stateSinkObject.Save(2, &conn.initialized)
+ stateSinkObject.Save(4, &conn.connected)
+ stateSinkObject.Save(5, &conn.connInitError)
+ stateSinkObject.Save(6, &conn.connInitSuccess)
+ stateSinkObject.Save(7, &conn.aborted)
+ stateSinkObject.Save(8, &conn.numWaiting)
+ stateSinkObject.Save(9, &conn.asyncNum)
+ stateSinkObject.Save(10, &conn.asyncCongestionThreshold)
+ stateSinkObject.Save(11, &conn.asyncNumMax)
+ stateSinkObject.Save(12, &conn.maxRead)
+ stateSinkObject.Save(13, &conn.maxWrite)
+ stateSinkObject.Save(14, &conn.maxPages)
+ stateSinkObject.Save(15, &conn.minor)
+ stateSinkObject.Save(16, &conn.atomicOTrunc)
+ stateSinkObject.Save(17, &conn.asyncRead)
+ stateSinkObject.Save(18, &conn.writebackCache)
+ stateSinkObject.Save(19, &conn.bigWrites)
+ stateSinkObject.Save(20, &conn.dontMask)
+ stateSinkObject.Save(21, &conn.noOpen)
+}
+
+func (conn *connection) afterLoad() {}
+
+func (conn *connection) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &conn.fd)
+ stateSourceObject.Load(1, &conn.attributeVersion)
+ stateSourceObject.Load(2, &conn.initialized)
+ stateSourceObject.Load(4, &conn.connected)
+ stateSourceObject.Load(5, &conn.connInitError)
+ stateSourceObject.Load(6, &conn.connInitSuccess)
+ stateSourceObject.Load(7, &conn.aborted)
+ stateSourceObject.Load(8, &conn.numWaiting)
+ stateSourceObject.Load(9, &conn.asyncNum)
+ stateSourceObject.Load(10, &conn.asyncCongestionThreshold)
+ stateSourceObject.Load(11, &conn.asyncNumMax)
+ stateSourceObject.Load(12, &conn.maxRead)
+ stateSourceObject.Load(13, &conn.maxWrite)
+ stateSourceObject.Load(14, &conn.maxPages)
+ stateSourceObject.Load(15, &conn.minor)
+ stateSourceObject.Load(16, &conn.atomicOTrunc)
+ stateSourceObject.Load(17, &conn.asyncRead)
+ stateSourceObject.Load(18, &conn.writebackCache)
+ stateSourceObject.Load(19, &conn.bigWrites)
+ stateSourceObject.Load(20, &conn.dontMask)
+ stateSourceObject.Load(21, &conn.noOpen)
+ stateSourceObject.LoadValue(3, new(bool), func(y interface{}) { conn.loadInitializedChan(y.(bool)) })
+}
+
+func (f *fuseDevice) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.fuseDevice"
+}
+
+func (f *fuseDevice) StateFields() []string {
+ return []string{}
+}
+
+func (f *fuseDevice) beforeSave() {}
+
+func (f *fuseDevice) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *fuseDevice) afterLoad() {}
+
+func (f *fuseDevice) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fd *DeviceFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.DeviceFD"
+}
+
+func (fd *DeviceFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "nextOpID",
+ "queue",
+ "numActiveRequests",
+ "completions",
+ "writeCursor",
+ "writeBuf",
+ "writeCursorFR",
+ "waitQueue",
+ "fullQueueCh",
+ "fs",
+ }
+}
+
+func (fd *DeviceFD) beforeSave() {}
+
+func (fd *DeviceFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ var fullQueueChValue int = fd.saveFullQueueCh()
+ stateSinkObject.SaveValue(12, fullQueueChValue)
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.NoLockFD)
+ stateSinkObject.Save(4, &fd.nextOpID)
+ stateSinkObject.Save(5, &fd.queue)
+ stateSinkObject.Save(6, &fd.numActiveRequests)
+ stateSinkObject.Save(7, &fd.completions)
+ stateSinkObject.Save(8, &fd.writeCursor)
+ stateSinkObject.Save(9, &fd.writeBuf)
+ stateSinkObject.Save(10, &fd.writeCursorFR)
+ stateSinkObject.Save(11, &fd.waitQueue)
+ stateSinkObject.Save(13, &fd.fs)
+}
+
+func (fd *DeviceFD) afterLoad() {}
+
+func (fd *DeviceFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.NoLockFD)
+ stateSourceObject.Load(4, &fd.nextOpID)
+ stateSourceObject.Load(5, &fd.queue)
+ stateSourceObject.Load(6, &fd.numActiveRequests)
+ stateSourceObject.Load(7, &fd.completions)
+ stateSourceObject.Load(8, &fd.writeCursor)
+ stateSourceObject.Load(9, &fd.writeBuf)
+ stateSourceObject.Load(10, &fd.writeCursorFR)
+ stateSourceObject.Load(11, &fd.waitQueue)
+ stateSourceObject.Load(13, &fd.fs)
+ stateSourceObject.LoadValue(12, new(int), func(y interface{}) { fd.loadFullQueueCh(y.(int)) })
+}
+
+func (fsType *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.FilesystemType"
+}
+
+func (fsType *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *FilesystemType) beforeSave() {}
+
+func (fsType *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *FilesystemType) afterLoad() {}
+
+func (fsType *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *filesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.filesystemOptions"
+}
+
+func (f *filesystemOptions) StateFields() []string {
+ return []string{
+ "userID",
+ "groupID",
+ "rootMode",
+ "maxActiveRequests",
+ "maxRead",
+ }
+}
+
+func (f *filesystemOptions) beforeSave() {}
+
+func (f *filesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.userID)
+ stateSinkObject.Save(1, &f.groupID)
+ stateSinkObject.Save(2, &f.rootMode)
+ stateSinkObject.Save(3, &f.maxActiveRequests)
+ stateSinkObject.Save(4, &f.maxRead)
+}
+
+func (f *filesystemOptions) afterLoad() {}
+
+func (f *filesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.userID)
+ stateSourceObject.Load(1, &f.groupID)
+ stateSourceObject.Load(2, &f.rootMode)
+ stateSourceObject.Load(3, &f.maxActiveRequests)
+ stateSourceObject.Load(4, &f.maxRead)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ "conn",
+ "opts",
+ "umounted",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+ stateSinkObject.Save(2, &fs.conn)
+ stateSinkObject.Save(3, &fs.opts)
+ stateSinkObject.Save(4, &fs.umounted)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+ stateSourceObject.Load(2, &fs.conn)
+ stateSourceObject.Load(3, &fs.opts)
+ stateSourceObject.Load(4, &fs.umounted)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "inodeRefs",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "OrderedChildren",
+ "fs",
+ "metadataMu",
+ "nodeID",
+ "locks",
+ "size",
+ "attributeVersion",
+ "attributeTime",
+ "version",
+ "link",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.inodeRefs)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.OrderedChildren)
+ stateSinkObject.Save(6, &i.fs)
+ stateSinkObject.Save(7, &i.metadataMu)
+ stateSinkObject.Save(8, &i.nodeID)
+ stateSinkObject.Save(9, &i.locks)
+ stateSinkObject.Save(10, &i.size)
+ stateSinkObject.Save(11, &i.attributeVersion)
+ stateSinkObject.Save(12, &i.attributeTime)
+ stateSinkObject.Save(13, &i.version)
+ stateSinkObject.Save(14, &i.link)
+}
+
+func (i *inode) afterLoad() {}
+
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.inodeRefs)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.OrderedChildren)
+ stateSourceObject.Load(6, &i.fs)
+ stateSourceObject.Load(7, &i.metadataMu)
+ stateSourceObject.Load(8, &i.nodeID)
+ stateSourceObject.Load(9, &i.locks)
+ stateSourceObject.Load(10, &i.size)
+ stateSourceObject.Load(11, &i.attributeVersion)
+ stateSourceObject.Load(12, &i.attributeTime)
+ stateSourceObject.Load(13, &i.version)
+ stateSourceObject.Load(14, &i.link)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (l *requestList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.requestList"
+}
+
+func (l *requestList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *requestList) beforeSave() {}
+
+func (l *requestList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *requestList) afterLoad() {}
+
+func (l *requestList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *requestEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.requestEntry"
+}
+
+func (e *requestEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *requestEntry) beforeSave() {}
+
+func (e *requestEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *requestEntry) afterLoad() {}
+
+func (e *requestEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *Request) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.Request"
+}
+
+func (r *Request) StateFields() []string {
+ return []string{
+ "requestEntry",
+ "id",
+ "hdr",
+ "data",
+ "payload",
+ "async",
+ "noReply",
+ }
+}
+
+func (r *Request) beforeSave() {}
+
+func (r *Request) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.requestEntry)
+ stateSinkObject.Save(1, &r.id)
+ stateSinkObject.Save(2, &r.hdr)
+ stateSinkObject.Save(3, &r.data)
+ stateSinkObject.Save(4, &r.payload)
+ stateSinkObject.Save(5, &r.async)
+ stateSinkObject.Save(6, &r.noReply)
+}
+
+func (r *Request) afterLoad() {}
+
+func (r *Request) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.requestEntry)
+ stateSourceObject.Load(1, &r.id)
+ stateSourceObject.Load(2, &r.hdr)
+ stateSourceObject.Load(3, &r.data)
+ stateSourceObject.Load(4, &r.payload)
+ stateSourceObject.Load(5, &r.async)
+ stateSourceObject.Load(6, &r.noReply)
+}
+
+func (f *futureResponse) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.futureResponse"
+}
+
+func (f *futureResponse) StateFields() []string {
+ return []string{
+ "opcode",
+ "ch",
+ "hdr",
+ "data",
+ "async",
+ }
+}
+
+func (f *futureResponse) beforeSave() {}
+
+func (f *futureResponse) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.opcode)
+ stateSinkObject.Save(1, &f.ch)
+ stateSinkObject.Save(2, &f.hdr)
+ stateSinkObject.Save(3, &f.data)
+ stateSinkObject.Save(4, &f.async)
+}
+
+func (f *futureResponse) afterLoad() {}
+
+func (f *futureResponse) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.opcode)
+ stateSourceObject.Load(1, &f.ch)
+ stateSourceObject.Load(2, &f.hdr)
+ stateSourceObject.Load(3, &f.data)
+ stateSourceObject.Load(4, &f.async)
+}
+
+func (r *Response) StateTypeName() string {
+ return "pkg/sentry/fsimpl/fuse.Response"
+}
+
+func (r *Response) StateFields() []string {
+ return []string{
+ "opcode",
+ "hdr",
+ "data",
+ }
+}
+
+func (r *Response) beforeSave() {}
+
+func (r *Response) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.opcode)
+ stateSinkObject.Save(1, &r.hdr)
+ stateSinkObject.Save(2, &r.data)
+}
+
+func (r *Response) afterLoad() {}
+
+func (r *Response) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.opcode)
+ stateSourceObject.Load(1, &r.hdr)
+ stateSourceObject.Load(2, &r.data)
+}
+
+func init() {
+ state.Register((*connection)(nil))
+ state.Register((*fuseDevice)(nil))
+ state.Register((*DeviceFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystemOptions)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*requestList)(nil))
+ state.Register((*requestEntry)(nil))
+ state.Register((*Request)(nil))
+ state.Register((*futureResponse)(nil))
+ state.Register((*Response)(nil))
+}
diff --git a/pkg/sentry/fsimpl/fuse/inode_refs.go b/pkg/sentry/fsimpl/fuse/inode_refs.go
new file mode 100644
index 000000000..31cb3791f
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/inode_refs.go
@@ -0,0 +1,128 @@
+package fuse
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *inodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/fuse/request_list.go b/pkg/sentry/fsimpl/fuse/request_list.go
new file mode 100644
index 000000000..002262f23
--- /dev/null
+++ b/pkg/sentry/fsimpl/fuse/request_list.go
@@ -0,0 +1,193 @@
+package fuse
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type requestElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (requestElementMapper) linkerFor(elem *Request) *Request { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type requestList struct {
+ head *Request
+ tail *Request
+}
+
+// Reset resets list l to the empty state.
+func (l *requestList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+func (l *requestList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+func (l *requestList) Front() *Request {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+func (l *requestList) Back() *Request {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+func (l *requestList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (requestElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+func (l *requestList) PushFront(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ requestElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+func (l *requestList) PushBack(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ requestElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+func (l *requestList) PushBackList(m *requestList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ requestElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ requestElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+func (l *requestList) InsertAfter(b, e *Request) {
+ bLinker := requestElementMapper{}.linkerFor(b)
+ eLinker := requestElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ requestElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+func (l *requestList) InsertBefore(a, e *Request) {
+ aLinker := requestElementMapper{}.linkerFor(a)
+ eLinker := requestElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ requestElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+func (l *requestList) Remove(e *Request) {
+ linker := requestElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ requestElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ requestElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type requestEntry struct {
+ next *Request
+ prev *Request
+}
+
+// Next returns the entry that follows e in the list.
+func (e *requestEntry) Next() *Request {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+func (e *requestEntry) Prev() *Request {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+func (e *requestEntry) SetNext(elem *Request) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+func (e *requestEntry) SetPrev(elem *Request) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/fuse/utils_test.go b/pkg/sentry/fsimpl/fuse/utils_test.go
deleted file mode 100644
index e1d9e3365..000000000
--- a/pkg/sentry/fsimpl/fuse/utils_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fuse
-
-import (
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func setup(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Error creating kernel: %v", err)
- }
-
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
-
- k.VFS().MustRegisterFilesystemType(Name, &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserList: true,
- AllowUserMount: true,
- })
-
- mntns, err := k.VFS().NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("NewMountNamespace(): %v", err)
- }
-
- return testutil.NewSystem(ctx, t, k.VFS(), mntns)
-}
-
-// newTestConnection creates a fuse connection that the sentry can communicate with
-// and the FD for the server to communicate with.
-func newTestConnection(system *testutil.System, k *kernel.Kernel, maxActiveRequests uint64) (*connection, *vfs.FileDescription, error) {
- vfsObj := &vfs.VirtualFilesystem{}
- fuseDev := &DeviceFD{}
-
- if err := vfsObj.Init(system.Ctx); err != nil {
- return nil, nil, err
- }
-
- vd := vfsObj.NewAnonVirtualDentry("genCountFD")
- defer vd.DecRef(system.Ctx)
- if err := fuseDev.vfsfd.Init(fuseDev, linux.O_RDWR|linux.O_CREAT, vd.Mount(), vd.Dentry(), &vfs.FileDescriptionOptions{}); err != nil {
- return nil, nil, err
- }
-
- fsopts := filesystemOptions{
- maxActiveRequests: maxActiveRequests,
- }
- fs, err := newFUSEFilesystem(system.Ctx, 0, &fsopts, &fuseDev.vfsfd)
- if err != nil {
- return nil, nil, err
- }
-
- return fs.conn, &fuseDev.vfsfd, nil
-}
-
-type testPayload struct {
- marshal.StubMarshallable
- data uint32
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *testPayload) SizeBytes() int {
- return 4
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *testPayload) MarshalBytes(dst []byte) {
- usermem.ByteOrder.PutUint32(dst[:4], t.data)
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *testPayload) UnmarshalBytes(src []byte) {
- *t = testPayload{data: usermem.ByteOrder.Uint32(src[:4])}
-}
-
-// Packed implements marshal.Marshallable.Packed.
-func (t *testPayload) Packed() bool {
- return true
-}
-
-// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
-func (t *testPayload) MarshalUnsafe(dst []byte) {
- t.MarshalBytes(dst)
-}
-
-// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
-func (t *testPayload) UnmarshalUnsafe(src []byte) {
- t.UnmarshalBytes(src)
-}
-
-// CopyOutN implements marshal.Marshallable.CopyOutN.
-func (t *testPayload) CopyOutN(task marshal.CopyContext, addr usermem.Addr, limit int) (int, error) {
- panic("not implemented")
-}
-
-// CopyOut implements marshal.Marshallable.CopyOut.
-func (t *testPayload) CopyOut(task marshal.CopyContext, addr usermem.Addr) (int, error) {
- panic("not implemented")
-}
-
-// CopyIn implements marshal.Marshallable.CopyIn.
-func (t *testPayload) CopyIn(task marshal.CopyContext, addr usermem.Addr) (int, error) {
- panic("not implemented")
-}
-
-// WriteTo implements io.WriterTo.WriteTo.
-func (t *testPayload) WriteTo(w io.Writer) (int64, error) {
- panic("not implemented")
-}
diff --git a/pkg/sentry/fsimpl/gofer/BUILD b/pkg/sentry/fsimpl/gofer/BUILD
deleted file mode 100644
index 4c3e9acf8..000000000
--- a/pkg/sentry/fsimpl/gofer/BUILD
+++ /dev/null
@@ -1,94 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "gofer",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dentry",
- "Linker": "*dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "gofer",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "gofer",
- srcs = [
- "dentry_list.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "gofer.go",
- "handle.go",
- "host_named_pipe.go",
- "p9file.go",
- "regular_file.go",
- "save_restore.go",
- "socket.go",
- "special_file.go",
- "symlink.go",
- "time.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/p9",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/host",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "gofer_test",
- srcs = ["gofer_test.go"],
- library = ":gofer",
- deps = [
- "//pkg/p9",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/pgalloc",
- ],
-)
diff --git a/pkg/sentry/fsimpl/gofer/dentry_list.go b/pkg/sentry/fsimpl/gofer/dentry_list.go
new file mode 100644
index 000000000..84f839e3a
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/dentry_list.go
@@ -0,0 +1,193 @@
+package gofer
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *dentry) *dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *dentry
+ tail *dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+func (l *dentryList) Front() *dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+func (l *dentryList) Back() *dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+func (l *dentryList) PushFront(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+func (l *dentryList) PushBack(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+func (l *dentryList) InsertAfter(b, e *dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+func (l *dentryList) InsertBefore(a, e *dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+func (l *dentryList) Remove(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *dentry
+ prev *dentry
+}
+
+// Next returns the entry that follows e in the list.
+func (e *dentryEntry) Next() *dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+func (e *dentryEntry) Prev() *dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+func (e *dentryEntry) SetNext(elem *dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+func (e *dentryEntry) SetPrev(elem *dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/gofer/fstree.go b/pkg/sentry/fsimpl/gofer/fstree.go
new file mode 100644
index 000000000..6e43d4a4b
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/fstree.go
@@ -0,0 +1,55 @@
+package gofer
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
new file mode 100644
index 000000000..26d0ab4db
--- /dev/null
+++ b/pkg/sentry/fsimpl/gofer/gofer_state_autogen.go
@@ -0,0 +1,580 @@
+// automatically generated by stateify.
+
+package gofer
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "off",
+ "dirents",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.off)
+ stateSinkObject.Save(3, &fd.dirents)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.off)
+ stateSourceObject.Load(3, &fd.dirents)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "mfp",
+ "opts",
+ "iopts",
+ "clock",
+ "devMinor",
+ "root",
+ "cachedDentries",
+ "cachedDentriesLen",
+ "syncableDentries",
+ "specialFileFDs",
+ "lastIno",
+ "savedDentryRW",
+ "released",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.mfp)
+ stateSinkObject.Save(2, &fs.opts)
+ stateSinkObject.Save(3, &fs.iopts)
+ stateSinkObject.Save(4, &fs.clock)
+ stateSinkObject.Save(5, &fs.devMinor)
+ stateSinkObject.Save(6, &fs.root)
+ stateSinkObject.Save(7, &fs.cachedDentries)
+ stateSinkObject.Save(8, &fs.cachedDentriesLen)
+ stateSinkObject.Save(9, &fs.syncableDentries)
+ stateSinkObject.Save(10, &fs.specialFileFDs)
+ stateSinkObject.Save(11, &fs.lastIno)
+ stateSinkObject.Save(12, &fs.savedDentryRW)
+ stateSinkObject.Save(13, &fs.released)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.mfp)
+ stateSourceObject.Load(2, &fs.opts)
+ stateSourceObject.Load(3, &fs.iopts)
+ stateSourceObject.Load(4, &fs.clock)
+ stateSourceObject.Load(5, &fs.devMinor)
+ stateSourceObject.Load(6, &fs.root)
+ stateSourceObject.Load(7, &fs.cachedDentries)
+ stateSourceObject.Load(8, &fs.cachedDentriesLen)
+ stateSourceObject.Load(9, &fs.syncableDentries)
+ stateSourceObject.Load(10, &fs.specialFileFDs)
+ stateSourceObject.Load(11, &fs.lastIno)
+ stateSourceObject.Load(12, &fs.savedDentryRW)
+ stateSourceObject.Load(13, &fs.released)
+}
+
+func (f *filesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.filesystemOptions"
+}
+
+func (f *filesystemOptions) StateFields() []string {
+ return []string{
+ "fd",
+ "aname",
+ "interop",
+ "dfltuid",
+ "dfltgid",
+ "msize",
+ "version",
+ "maxCachedDentries",
+ "forcePageCache",
+ "limitHostFDTranslation",
+ "overlayfsStaleRead",
+ "regularFilesUseSpecialFileFD",
+ }
+}
+
+func (f *filesystemOptions) beforeSave() {}
+
+func (f *filesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.fd)
+ stateSinkObject.Save(1, &f.aname)
+ stateSinkObject.Save(2, &f.interop)
+ stateSinkObject.Save(3, &f.dfltuid)
+ stateSinkObject.Save(4, &f.dfltgid)
+ stateSinkObject.Save(5, &f.msize)
+ stateSinkObject.Save(6, &f.version)
+ stateSinkObject.Save(7, &f.maxCachedDentries)
+ stateSinkObject.Save(8, &f.forcePageCache)
+ stateSinkObject.Save(9, &f.limitHostFDTranslation)
+ stateSinkObject.Save(10, &f.overlayfsStaleRead)
+ stateSinkObject.Save(11, &f.regularFilesUseSpecialFileFD)
+}
+
+func (f *filesystemOptions) afterLoad() {}
+
+func (f *filesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.fd)
+ stateSourceObject.Load(1, &f.aname)
+ stateSourceObject.Load(2, &f.interop)
+ stateSourceObject.Load(3, &f.dfltuid)
+ stateSourceObject.Load(4, &f.dfltgid)
+ stateSourceObject.Load(5, &f.msize)
+ stateSourceObject.Load(6, &f.version)
+ stateSourceObject.Load(7, &f.maxCachedDentries)
+ stateSourceObject.Load(8, &f.forcePageCache)
+ stateSourceObject.Load(9, &f.limitHostFDTranslation)
+ stateSourceObject.Load(10, &f.overlayfsStaleRead)
+ stateSourceObject.Load(11, &f.regularFilesUseSpecialFileFD)
+}
+
+func (i *InteropMode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.InteropMode"
+}
+
+func (i *InteropMode) StateFields() []string {
+ return nil
+}
+
+func (i *InternalFilesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.InternalFilesystemOptions"
+}
+
+func (i *InternalFilesystemOptions) StateFields() []string {
+ return []string{
+ "UniqueID",
+ "LeakConnection",
+ "OpenSocketsByConnecting",
+ }
+}
+
+func (i *InternalFilesystemOptions) beforeSave() {}
+
+func (i *InternalFilesystemOptions) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.UniqueID)
+ stateSinkObject.Save(1, &i.LeakConnection)
+ stateSinkObject.Save(2, &i.OpenSocketsByConnecting)
+}
+
+func (i *InternalFilesystemOptions) afterLoad() {}
+
+func (i *InternalFilesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.UniqueID)
+ stateSourceObject.Load(1, &i.LeakConnection)
+ stateSourceObject.Load(2, &i.OpenSocketsByConnecting)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "parent",
+ "name",
+ "qidPath",
+ "deleted",
+ "cached",
+ "dentryEntry",
+ "children",
+ "syntheticChildren",
+ "dirents",
+ "ino",
+ "mode",
+ "uid",
+ "gid",
+ "blockSize",
+ "atime",
+ "mtime",
+ "ctime",
+ "btime",
+ "size",
+ "atimeDirty",
+ "mtimeDirty",
+ "nlink",
+ "mappings",
+ "cache",
+ "dirty",
+ "pf",
+ "haveTarget",
+ "target",
+ "endpoint",
+ "pipe",
+ "locks",
+ "watches",
+ }
+}
+
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.parent)
+ stateSinkObject.Save(4, &d.name)
+ stateSinkObject.Save(5, &d.qidPath)
+ stateSinkObject.Save(6, &d.deleted)
+ stateSinkObject.Save(7, &d.cached)
+ stateSinkObject.Save(8, &d.dentryEntry)
+ stateSinkObject.Save(9, &d.children)
+ stateSinkObject.Save(10, &d.syntheticChildren)
+ stateSinkObject.Save(11, &d.dirents)
+ stateSinkObject.Save(12, &d.ino)
+ stateSinkObject.Save(13, &d.mode)
+ stateSinkObject.Save(14, &d.uid)
+ stateSinkObject.Save(15, &d.gid)
+ stateSinkObject.Save(16, &d.blockSize)
+ stateSinkObject.Save(17, &d.atime)
+ stateSinkObject.Save(18, &d.mtime)
+ stateSinkObject.Save(19, &d.ctime)
+ stateSinkObject.Save(20, &d.btime)
+ stateSinkObject.Save(21, &d.size)
+ stateSinkObject.Save(22, &d.atimeDirty)
+ stateSinkObject.Save(23, &d.mtimeDirty)
+ stateSinkObject.Save(24, &d.nlink)
+ stateSinkObject.Save(25, &d.mappings)
+ stateSinkObject.Save(26, &d.cache)
+ stateSinkObject.Save(27, &d.dirty)
+ stateSinkObject.Save(28, &d.pf)
+ stateSinkObject.Save(29, &d.haveTarget)
+ stateSinkObject.Save(30, &d.target)
+ stateSinkObject.Save(31, &d.endpoint)
+ stateSinkObject.Save(32, &d.pipe)
+ stateSinkObject.Save(33, &d.locks)
+ stateSinkObject.Save(34, &d.watches)
+}
+
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.parent)
+ stateSourceObject.Load(4, &d.name)
+ stateSourceObject.Load(5, &d.qidPath)
+ stateSourceObject.Load(6, &d.deleted)
+ stateSourceObject.Load(7, &d.cached)
+ stateSourceObject.Load(8, &d.dentryEntry)
+ stateSourceObject.Load(9, &d.children)
+ stateSourceObject.Load(10, &d.syntheticChildren)
+ stateSourceObject.Load(11, &d.dirents)
+ stateSourceObject.Load(12, &d.ino)
+ stateSourceObject.Load(13, &d.mode)
+ stateSourceObject.Load(14, &d.uid)
+ stateSourceObject.Load(15, &d.gid)
+ stateSourceObject.Load(16, &d.blockSize)
+ stateSourceObject.Load(17, &d.atime)
+ stateSourceObject.Load(18, &d.mtime)
+ stateSourceObject.Load(19, &d.ctime)
+ stateSourceObject.Load(20, &d.btime)
+ stateSourceObject.Load(21, &d.size)
+ stateSourceObject.Load(22, &d.atimeDirty)
+ stateSourceObject.Load(23, &d.mtimeDirty)
+ stateSourceObject.Load(24, &d.nlink)
+ stateSourceObject.Load(25, &d.mappings)
+ stateSourceObject.Load(26, &d.cache)
+ stateSourceObject.Load(27, &d.dirty)
+ stateSourceObject.Load(28, &d.pf)
+ stateSourceObject.Load(29, &d.haveTarget)
+ stateSourceObject.Load(30, &d.target)
+ stateSourceObject.Load(31, &d.endpoint)
+ stateSourceObject.Load(32, &d.pipe)
+ stateSourceObject.Load(33, &d.locks)
+ stateSourceObject.Load(34, &d.watches)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "off",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.off)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.off)
+}
+
+func (d *dentryPlatformFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.dentryPlatformFile"
+}
+
+func (d *dentryPlatformFile) StateFields() []string {
+ return []string{
+ "dentry",
+ "fdRefs",
+ "hostFileMapper",
+ }
+}
+
+func (d *dentryPlatformFile) beforeSave() {}
+
+func (d *dentryPlatformFile) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dentry)
+ stateSinkObject.Save(1, &d.fdRefs)
+ stateSinkObject.Save(2, &d.hostFileMapper)
+}
+
+func (d *dentryPlatformFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dentry)
+ stateSourceObject.Load(1, &d.fdRefs)
+ stateSourceObject.Load(2, &d.hostFileMapper)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (s *savedDentryRW) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.savedDentryRW"
+}
+
+func (s *savedDentryRW) StateFields() []string {
+ return []string{
+ "read",
+ "write",
+ }
+}
+
+func (s *savedDentryRW) beforeSave() {}
+
+func (s *savedDentryRW) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.read)
+ stateSinkObject.Save(1, &s.write)
+}
+
+func (s *savedDentryRW) afterLoad() {}
+
+func (s *savedDentryRW) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.read)
+ stateSourceObject.Load(1, &s.write)
+}
+
+func (e *endpoint) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.endpoint"
+}
+
+func (e *endpoint) StateFields() []string {
+ return []string{
+ "dentry",
+ "path",
+ }
+}
+
+func (e *endpoint) beforeSave() {}
+
+func (e *endpoint) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.dentry)
+ stateSinkObject.Save(1, &e.path)
+}
+
+func (e *endpoint) afterLoad() {}
+
+func (e *endpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.dentry)
+ stateSourceObject.Load(1, &e.path)
+}
+
+func (fd *specialFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/gofer.specialFileFD"
+}
+
+func (fd *specialFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "isRegularFile",
+ "seekable",
+ "queue",
+ "off",
+ "haveBuf",
+ "buf",
+ }
+}
+
+func (fd *specialFileFD) beforeSave() {}
+
+func (fd *specialFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.isRegularFile)
+ stateSinkObject.Save(2, &fd.seekable)
+ stateSinkObject.Save(3, &fd.queue)
+ stateSinkObject.Save(4, &fd.off)
+ stateSinkObject.Save(5, &fd.haveBuf)
+ stateSinkObject.Save(6, &fd.buf)
+}
+
+func (fd *specialFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.isRegularFile)
+ stateSourceObject.Load(2, &fd.seekable)
+ stateSourceObject.Load(3, &fd.queue)
+ stateSourceObject.Load(4, &fd.off)
+ stateSourceObject.Load(5, &fd.haveBuf)
+ stateSourceObject.Load(6, &fd.buf)
+ stateSourceObject.AfterLoad(fd.afterLoad)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*directoryFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*filesystemOptions)(nil))
+ state.Register((*InteropMode)(nil))
+ state.Register((*InternalFilesystemOptions)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*regularFileFD)(nil))
+ state.Register((*dentryPlatformFile)(nil))
+ state.Register((*savedDentryRW)(nil))
+ state.Register((*endpoint)(nil))
+ state.Register((*specialFileFD)(nil))
+}
diff --git a/pkg/sentry/fsimpl/gofer/gofer_test.go b/pkg/sentry/fsimpl/gofer/gofer_test.go
deleted file mode 100644
index 76f08e252..000000000
--- a/pkg/sentry/fsimpl/gofer/gofer_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gofer
-
-import (
- "sync/atomic"
- "testing"
-
- "gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
-)
-
-func TestDestroyIdempotent(t *testing.T) {
- ctx := contexttest.Context(t)
- fs := filesystem{
- mfp: pgalloc.MemoryFileProviderFromContext(ctx),
- opts: filesystemOptions{
- // Test relies on no dentry being held in the cache.
- maxCachedDentries: 0,
- },
- syncableDentries: make(map[*dentry]struct{}),
- inoByQIDPath: make(map[uint64]uint64),
- }
-
- attr := &p9.Attr{
- Mode: p9.ModeRegular,
- }
- mask := p9.AttrMask{
- Mode: true,
- Size: true,
- }
- parent, err := fs.newDentry(ctx, p9file{}, p9.QID{}, mask, attr)
- if err != nil {
- t.Fatalf("fs.newDentry(): %v", err)
- }
-
- child, err := fs.newDentry(ctx, p9file{}, p9.QID{}, mask, attr)
- if err != nil {
- t.Fatalf("fs.newDentry(): %v", err)
- }
- parent.cacheNewChildLocked(child, "child")
-
- fs.renameMu.Lock()
- defer fs.renameMu.Unlock()
- child.checkCachingLocked(ctx)
- if got := atomic.LoadInt64(&child.refs); got != -1 {
- t.Fatalf("child.refs=%d, want: -1", got)
- }
- // Parent will also be destroyed when child reference is removed.
- if got := atomic.LoadInt64(&parent.refs); got != -1 {
- t.Fatalf("parent.refs=%d, want: -1", got)
- }
- child.checkCachingLocked(ctx)
- child.checkCachingLocked(ctx)
-}
diff --git a/pkg/sentry/fsimpl/host/BUILD b/pkg/sentry/fsimpl/host/BUILD
deleted file mode 100644
index 4ae9d6d5e..000000000
--- a/pkg/sentry/fsimpl/host/BUILD
+++ /dev/null
@@ -1,79 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "host",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_template_instance(
- name = "connected_endpoint_refs",
- out = "connected_endpoint_refs.go",
- package = "host",
- prefix = "ConnectedEndpoint",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "ConnectedEndpoint",
- },
-)
-
-go_library(
- name = "host",
- srcs = [
- "connected_endpoint_refs.go",
- "control.go",
- "host.go",
- "inode_refs.go",
- "ioctl_unsafe.go",
- "save_restore.go",
- "socket.go",
- "socket_iovec.go",
- "socket_unsafe.go",
- "tty.go",
- "util.go",
- "util_unsafe.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/iovec",
- "//pkg/log",
- "//pkg/marshal/primitive",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/hostfd",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/control",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/unimpl",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserr",
- "//pkg/syserror",
- "//pkg/tcpip",
- "//pkg/unet",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/host/connected_endpoint_refs.go b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
new file mode 100644
index 000000000..faa9d2fd2
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/connected_endpoint_refs.go
@@ -0,0 +1,128 @@
+package host
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const ConnectedEndpointenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var ConnectedEndpointobj *ConnectedEndpoint
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type ConnectedEndpointRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *ConnectedEndpointRefs) RefType() string {
+ return fmt.Sprintf("%T", ConnectedEndpointobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *ConnectedEndpointRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *ConnectedEndpointRefs) LogRefs() bool {
+ return ConnectedEndpointenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *ConnectedEndpointRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *ConnectedEndpointRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *ConnectedEndpointRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *ConnectedEndpointRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/host/host_state_autogen.go b/pkg/sentry/fsimpl/host/host_state_autogen.go
new file mode 100644
index 000000000..705f8010a
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/host_state_autogen.go
@@ -0,0 +1,274 @@
+// automatically generated by stateify.
+
+package host
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *ConnectedEndpointRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.ConnectedEndpointRefs"
+}
+
+func (r *ConnectedEndpointRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *ConnectedEndpointRefs) beforeSave() {}
+
+func (r *ConnectedEndpointRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *ConnectedEndpointRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeNoStatFS",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "CachedMappable",
+ "InodeTemporary",
+ "locks",
+ "inodeRefs",
+ "hostFD",
+ "ino",
+ "ftype",
+ "mayBlock",
+ "seekable",
+ "isTTY",
+ "savable",
+ "queue",
+ "haveBuf",
+ "buf",
+ }
+}
+
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNoStatFS)
+ stateSinkObject.Save(1, &i.InodeNotDirectory)
+ stateSinkObject.Save(2, &i.InodeNotSymlink)
+ stateSinkObject.Save(3, &i.CachedMappable)
+ stateSinkObject.Save(4, &i.InodeTemporary)
+ stateSinkObject.Save(5, &i.locks)
+ stateSinkObject.Save(6, &i.inodeRefs)
+ stateSinkObject.Save(7, &i.hostFD)
+ stateSinkObject.Save(8, &i.ino)
+ stateSinkObject.Save(9, &i.ftype)
+ stateSinkObject.Save(10, &i.mayBlock)
+ stateSinkObject.Save(11, &i.seekable)
+ stateSinkObject.Save(12, &i.isTTY)
+ stateSinkObject.Save(13, &i.savable)
+ stateSinkObject.Save(14, &i.queue)
+ stateSinkObject.Save(15, &i.haveBuf)
+ stateSinkObject.Save(16, &i.buf)
+}
+
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNoStatFS)
+ stateSourceObject.Load(1, &i.InodeNotDirectory)
+ stateSourceObject.Load(2, &i.InodeNotSymlink)
+ stateSourceObject.Load(3, &i.CachedMappable)
+ stateSourceObject.Load(4, &i.InodeTemporary)
+ stateSourceObject.Load(5, &i.locks)
+ stateSourceObject.Load(6, &i.inodeRefs)
+ stateSourceObject.Load(7, &i.hostFD)
+ stateSourceObject.Load(8, &i.ino)
+ stateSourceObject.Load(9, &i.ftype)
+ stateSourceObject.Load(10, &i.mayBlock)
+ stateSourceObject.Load(11, &i.seekable)
+ stateSourceObject.Load(12, &i.isTTY)
+ stateSourceObject.Load(13, &i.savable)
+ stateSourceObject.Load(14, &i.queue)
+ stateSourceObject.Load(15, &i.haveBuf)
+ stateSourceObject.Load(16, &i.buf)
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (f *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.filesystemType"
+}
+
+func (f *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystemType) beforeSave() {}
+
+func (f *filesystemType) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystemType) afterLoad() {}
+
+func (f *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (f *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.fileDescription"
+}
+
+func (f *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "offset",
+ }
+}
+
+func (f *fileDescription) beforeSave() {}
+
+func (f *fileDescription) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.vfsfd)
+ stateSinkObject.Save(1, &f.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &f.LockFD)
+ stateSinkObject.Save(3, &f.inode)
+ stateSinkObject.Save(4, &f.offset)
+}
+
+func (f *fileDescription) afterLoad() {}
+
+func (f *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.vfsfd)
+ stateSourceObject.Load(1, &f.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &f.LockFD)
+ stateSourceObject.Load(3, &f.inode)
+ stateSourceObject.Load(4, &f.offset)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (c *ConnectedEndpoint) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.ConnectedEndpoint"
+}
+
+func (c *ConnectedEndpoint) StateFields() []string {
+ return []string{
+ "ConnectedEndpointRefs",
+ "fd",
+ "addr",
+ "stype",
+ }
+}
+
+func (c *ConnectedEndpoint) beforeSave() {}
+
+func (c *ConnectedEndpoint) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.ConnectedEndpointRefs)
+ stateSinkObject.Save(1, &c.fd)
+ stateSinkObject.Save(2, &c.addr)
+ stateSinkObject.Save(3, &c.stype)
+}
+
+func (c *ConnectedEndpoint) afterLoad() {}
+
+func (c *ConnectedEndpoint) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.ConnectedEndpointRefs)
+ stateSourceObject.Load(1, &c.fd)
+ stateSourceObject.Load(2, &c.addr)
+ stateSourceObject.Load(3, &c.stype)
+}
+
+func (t *TTYFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/host.TTYFileDescription"
+}
+
+func (t *TTYFileDescription) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "session",
+ "fgProcessGroup",
+ "termios",
+ }
+}
+
+func (t *TTYFileDescription) beforeSave() {}
+
+func (t *TTYFileDescription) StateSave(stateSinkObject state.Sink) {
+ t.beforeSave()
+ stateSinkObject.Save(0, &t.fileDescription)
+ stateSinkObject.Save(1, &t.session)
+ stateSinkObject.Save(2, &t.fgProcessGroup)
+ stateSinkObject.Save(3, &t.termios)
+}
+
+func (t *TTYFileDescription) afterLoad() {}
+
+func (t *TTYFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &t.fileDescription)
+ stateSourceObject.Load(1, &t.session)
+ stateSourceObject.Load(2, &t.fgProcessGroup)
+ stateSourceObject.Load(3, &t.termios)
+}
+
+func init() {
+ state.Register((*ConnectedEndpointRefs)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*ConnectedEndpoint)(nil))
+ state.Register((*TTYFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go b/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go
new file mode 100644
index 000000000..b2d8c661f
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/host_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package host
diff --git a/pkg/sentry/fsimpl/host/inode_refs.go b/pkg/sentry/fsimpl/host/inode_refs.go
new file mode 100644
index 000000000..ef2f56522
--- /dev/null
+++ b/pkg/sentry/fsimpl/host/inode_refs.go
@@ -0,0 +1,128 @@
+package host
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *inodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/kernfs/BUILD b/pkg/sentry/fsimpl/kernfs/BUILD
deleted file mode 100644
index 6dbc7e34d..000000000
--- a/pkg/sentry/fsimpl/kernfs/BUILD
+++ /dev/null
@@ -1,148 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "kernfs",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Dentry",
- "Linker": "*Dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "kernfs",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "Dentry",
- },
-)
-
-go_template_instance(
- name = "slot_list",
- out = "slot_list.go",
- package = "kernfs",
- prefix = "slot",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*slot",
- "Linker": "*slot",
- },
-)
-
-go_template_instance(
- name = "static_directory_refs",
- out = "static_directory_refs.go",
- package = "kernfs",
- prefix = "StaticDirectory",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "StaticDirectory",
- },
-)
-
-go_template_instance(
- name = "dir_refs",
- out = "dir_refs.go",
- package = "kernfs_test",
- prefix = "dir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "dir",
- },
-)
-
-go_template_instance(
- name = "readonly_dir_refs",
- out = "readonly_dir_refs.go",
- package = "kernfs_test",
- prefix = "readonlyDir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "readonlyDir",
- },
-)
-
-go_template_instance(
- name = "synthetic_directory_refs",
- out = "synthetic_directory_refs.go",
- package = "kernfs",
- prefix = "syntheticDirectory",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "syntheticDirectory",
- },
-)
-
-go_library(
- name = "kernfs",
- srcs = [
- "dentry_list.go",
- "dynamic_bytes_file.go",
- "fd_impl_util.go",
- "filesystem.go",
- "fstree.go",
- "inode_impl_util.go",
- "kernfs.go",
- "mmap_util.go",
- "save_restore.go",
- "slot_list.go",
- "static_directory_refs.go",
- "symlink.go",
- "synthetic_directory.go",
- "synthetic_directory_refs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "kernfs_test",
- size = "small",
- srcs = [
- "dir_refs.go",
- "kernfs_test.go",
- "readonly_dir_refs.go",
- ],
- deps = [
- ":kernfs",
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/kernfs/dentry_list.go b/pkg/sentry/fsimpl/kernfs/dentry_list.go
new file mode 100644
index 000000000..06101fa32
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/dentry_list.go
@@ -0,0 +1,193 @@
+package kernfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *Dentry) *Dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *Dentry
+ tail *Dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+func (l *dentryList) Front() *Dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+func (l *dentryList) Back() *Dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+func (l *dentryList) PushFront(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+func (l *dentryList) PushBack(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+func (l *dentryList) InsertAfter(b, e *Dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+func (l *dentryList) InsertBefore(a, e *Dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+func (l *dentryList) Remove(e *Dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *Dentry
+ prev *Dentry
+}
+
+// Next returns the entry that follows e in the list.
+func (e *dentryEntry) Next() *Dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+func (e *dentryEntry) Prev() *Dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+func (e *dentryEntry) SetNext(elem *Dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+func (e *dentryEntry) SetPrev(elem *Dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/kernfs/fstree.go b/pkg/sentry/fsimpl/kernfs/fstree.go
new file mode 100644
index 000000000..9dc52dabc
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/fstree.go
@@ -0,0 +1,55 @@
+package kernfs
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *Dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *Dentry) *Dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *Dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *Dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go b/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go
new file mode 100644
index 000000000..9bac6798d
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/kernfs_state_autogen.go
@@ -0,0 +1,925 @@
+// automatically generated by stateify.
+
+package kernfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (f *DynamicBytesFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.DynamicBytesFile"
+}
+
+func (f *DynamicBytesFile) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ "data",
+ }
+}
+
+func (f *DynamicBytesFile) beforeSave() {}
+
+func (f *DynamicBytesFile) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeAttrs)
+ stateSinkObject.Save(1, &f.InodeNoStatFS)
+ stateSinkObject.Save(2, &f.InodeNoopRefCount)
+ stateSinkObject.Save(3, &f.InodeNotDirectory)
+ stateSinkObject.Save(4, &f.InodeNotSymlink)
+ stateSinkObject.Save(5, &f.locks)
+ stateSinkObject.Save(6, &f.data)
+}
+
+func (f *DynamicBytesFile) afterLoad() {}
+
+func (f *DynamicBytesFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeAttrs)
+ stateSourceObject.Load(1, &f.InodeNoStatFS)
+ stateSourceObject.Load(2, &f.InodeNoopRefCount)
+ stateSourceObject.Load(3, &f.InodeNotDirectory)
+ stateSourceObject.Load(4, &f.InodeNotSymlink)
+ stateSourceObject.Load(5, &f.locks)
+ stateSourceObject.Load(6, &f.data)
+}
+
+func (fd *DynamicBytesFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.DynamicBytesFD"
+}
+
+func (fd *DynamicBytesFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "DynamicBytesFileDescriptionImpl",
+ "LockFD",
+ "vfsfd",
+ "inode",
+ }
+}
+
+func (fd *DynamicBytesFD) beforeSave() {}
+
+func (fd *DynamicBytesFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.DynamicBytesFileDescriptionImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.vfsfd)
+ stateSinkObject.Save(4, &fd.inode)
+}
+
+func (fd *DynamicBytesFD) afterLoad() {}
+
+func (fd *DynamicBytesFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.DynamicBytesFileDescriptionImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.vfsfd)
+ stateSourceObject.Load(4, &fd.inode)
+}
+
+func (s *SeekEndConfig) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.SeekEndConfig"
+}
+
+func (s *SeekEndConfig) StateFields() []string {
+ return nil
+}
+
+func (g *GenericDirectoryFDOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.GenericDirectoryFDOptions"
+}
+
+func (g *GenericDirectoryFDOptions) StateFields() []string {
+ return []string{
+ "SeekEnd",
+ }
+}
+
+func (g *GenericDirectoryFDOptions) beforeSave() {}
+
+func (g *GenericDirectoryFDOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.SeekEnd)
+}
+
+func (g *GenericDirectoryFDOptions) afterLoad() {}
+
+func (g *GenericDirectoryFDOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.SeekEnd)
+}
+
+func (fd *GenericDirectoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.GenericDirectoryFD"
+}
+
+func (fd *GenericDirectoryFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "DirectoryFileDescriptionDefaultImpl",
+ "LockFD",
+ "seekEnd",
+ "vfsfd",
+ "children",
+ "off",
+ }
+}
+
+func (fd *GenericDirectoryFD) beforeSave() {}
+
+func (fd *GenericDirectoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.seekEnd)
+ stateSinkObject.Save(4, &fd.vfsfd)
+ stateSinkObject.Save(5, &fd.children)
+ stateSinkObject.Save(6, &fd.off)
+}
+
+func (fd *GenericDirectoryFD) afterLoad() {}
+
+func (fd *GenericDirectoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.seekEnd)
+ stateSourceObject.Load(4, &fd.vfsfd)
+ stateSourceObject.Load(5, &fd.children)
+ stateSourceObject.Load(6, &fd.off)
+}
+
+func (i *InodeNoopRefCount) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNoopRefCount"
+}
+
+func (i *InodeNoopRefCount) StateFields() []string {
+ return []string{
+ "InodeTemporary",
+ }
+}
+
+func (i *InodeNoopRefCount) beforeSave() {}
+
+func (i *InodeNoopRefCount) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeTemporary)
+}
+
+func (i *InodeNoopRefCount) afterLoad() {}
+
+func (i *InodeNoopRefCount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeTemporary)
+}
+
+func (i *InodeDirectoryNoNewChildren) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeDirectoryNoNewChildren"
+}
+
+func (i *InodeDirectoryNoNewChildren) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeDirectoryNoNewChildren) beforeSave() {}
+
+func (i *InodeDirectoryNoNewChildren) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeDirectoryNoNewChildren) afterLoad() {}
+
+func (i *InodeDirectoryNoNewChildren) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeNotDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNotDirectory"
+}
+
+func (i *InodeNotDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ }
+}
+
+func (i *InodeNotDirectory) beforeSave() {}
+
+func (i *InodeNotDirectory) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAlwaysValid)
+}
+
+func (i *InodeNotDirectory) afterLoad() {}
+
+func (i *InodeNotDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAlwaysValid)
+}
+
+func (i *InodeNotSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNotSymlink"
+}
+
+func (i *InodeNotSymlink) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeNotSymlink) beforeSave() {}
+
+func (i *InodeNotSymlink) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeNotSymlink) afterLoad() {}
+
+func (i *InodeNotSymlink) StateLoad(stateSourceObject state.Source) {
+}
+
+func (a *InodeAttrs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeAttrs"
+}
+
+func (a *InodeAttrs) StateFields() []string {
+ return []string{
+ "devMajor",
+ "devMinor",
+ "ino",
+ "mode",
+ "uid",
+ "gid",
+ "nlink",
+ "blockSize",
+ "atime",
+ "mtime",
+ "ctime",
+ }
+}
+
+func (a *InodeAttrs) beforeSave() {}
+
+func (a *InodeAttrs) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+ stateSinkObject.Save(0, &a.devMajor)
+ stateSinkObject.Save(1, &a.devMinor)
+ stateSinkObject.Save(2, &a.ino)
+ stateSinkObject.Save(3, &a.mode)
+ stateSinkObject.Save(4, &a.uid)
+ stateSinkObject.Save(5, &a.gid)
+ stateSinkObject.Save(6, &a.nlink)
+ stateSinkObject.Save(7, &a.blockSize)
+ stateSinkObject.Save(8, &a.atime)
+ stateSinkObject.Save(9, &a.mtime)
+ stateSinkObject.Save(10, &a.ctime)
+}
+
+func (a *InodeAttrs) afterLoad() {}
+
+func (a *InodeAttrs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &a.devMajor)
+ stateSourceObject.Load(1, &a.devMinor)
+ stateSourceObject.Load(2, &a.ino)
+ stateSourceObject.Load(3, &a.mode)
+ stateSourceObject.Load(4, &a.uid)
+ stateSourceObject.Load(5, &a.gid)
+ stateSourceObject.Load(6, &a.nlink)
+ stateSourceObject.Load(7, &a.blockSize)
+ stateSourceObject.Load(8, &a.atime)
+ stateSourceObject.Load(9, &a.mtime)
+ stateSourceObject.Load(10, &a.ctime)
+}
+
+func (s *slot) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slot"
+}
+
+func (s *slot) StateFields() []string {
+ return []string{
+ "name",
+ "inode",
+ "static",
+ "slotEntry",
+ }
+}
+
+func (s *slot) beforeSave() {}
+
+func (s *slot) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.name)
+ stateSinkObject.Save(1, &s.inode)
+ stateSinkObject.Save(2, &s.static)
+ stateSinkObject.Save(3, &s.slotEntry)
+}
+
+func (s *slot) afterLoad() {}
+
+func (s *slot) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.name)
+ stateSourceObject.Load(1, &s.inode)
+ stateSourceObject.Load(2, &s.static)
+ stateSourceObject.Load(3, &s.slotEntry)
+}
+
+func (o *OrderedChildrenOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.OrderedChildrenOptions"
+}
+
+func (o *OrderedChildrenOptions) StateFields() []string {
+ return []string{
+ "Writable",
+ }
+}
+
+func (o *OrderedChildrenOptions) beforeSave() {}
+
+func (o *OrderedChildrenOptions) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.Writable)
+}
+
+func (o *OrderedChildrenOptions) afterLoad() {}
+
+func (o *OrderedChildrenOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.Writable)
+}
+
+func (o *OrderedChildren) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.OrderedChildren"
+}
+
+func (o *OrderedChildren) StateFields() []string {
+ return []string{
+ "writable",
+ "order",
+ "set",
+ }
+}
+
+func (o *OrderedChildren) beforeSave() {}
+
+func (o *OrderedChildren) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.writable)
+ stateSinkObject.Save(1, &o.order)
+ stateSinkObject.Save(2, &o.set)
+}
+
+func (o *OrderedChildren) afterLoad() {}
+
+func (o *OrderedChildren) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.writable)
+ stateSourceObject.Load(1, &o.order)
+ stateSourceObject.Load(2, &o.set)
+}
+
+func (r *renameAcrossDifferentImplementationsError) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.renameAcrossDifferentImplementationsError"
+}
+
+func (r *renameAcrossDifferentImplementationsError) StateFields() []string {
+ return []string{}
+}
+
+func (r *renameAcrossDifferentImplementationsError) beforeSave() {}
+
+func (r *renameAcrossDifferentImplementationsError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *renameAcrossDifferentImplementationsError) afterLoad() {}
+
+func (r *renameAcrossDifferentImplementationsError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeSymlink"
+}
+
+func (i *InodeSymlink) StateFields() []string {
+ return []string{
+ "InodeNotDirectory",
+ }
+}
+
+func (i *InodeSymlink) beforeSave() {}
+
+func (i *InodeSymlink) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNotDirectory)
+}
+
+func (i *InodeSymlink) afterLoad() {}
+
+func (i *InodeSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNotDirectory)
+}
+
+func (s *StaticDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticDirectory"
+}
+
+func (s *StaticDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNoStatFS",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "StaticDirectoryRefs",
+ "locks",
+ "fdOpts",
+ }
+}
+
+func (s *StaticDirectory) beforeSave() {}
+
+func (s *StaticDirectory) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeAlwaysValid)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(3, &s.InodeNoStatFS)
+ stateSinkObject.Save(4, &s.InodeNotSymlink)
+ stateSinkObject.Save(5, &s.InodeTemporary)
+ stateSinkObject.Save(6, &s.OrderedChildren)
+ stateSinkObject.Save(7, &s.StaticDirectoryRefs)
+ stateSinkObject.Save(8, &s.locks)
+ stateSinkObject.Save(9, &s.fdOpts)
+}
+
+func (s *StaticDirectory) afterLoad() {}
+
+func (s *StaticDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeAlwaysValid)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(3, &s.InodeNoStatFS)
+ stateSourceObject.Load(4, &s.InodeNotSymlink)
+ stateSourceObject.Load(5, &s.InodeTemporary)
+ stateSourceObject.Load(6, &s.OrderedChildren)
+ stateSourceObject.Load(7, &s.StaticDirectoryRefs)
+ stateSourceObject.Load(8, &s.locks)
+ stateSourceObject.Load(9, &s.fdOpts)
+}
+
+func (i *InodeAlwaysValid) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeAlwaysValid"
+}
+
+func (i *InodeAlwaysValid) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeAlwaysValid) beforeSave() {}
+
+func (i *InodeAlwaysValid) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeAlwaysValid) afterLoad() {}
+
+func (i *InodeAlwaysValid) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeTemporary) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeTemporary"
+}
+
+func (i *InodeTemporary) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeTemporary) beforeSave() {}
+
+func (i *InodeTemporary) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeTemporary) afterLoad() {}
+
+func (i *InodeTemporary) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *InodeNoStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.InodeNoStatFS"
+}
+
+func (i *InodeNoStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *InodeNoStatFS) beforeSave() {}
+
+func (i *InodeNoStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *InodeNoStatFS) afterLoad() {}
+
+func (i *InodeNoStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *Filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.Filesystem"
+}
+
+func (fs *Filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "droppedDentries",
+ "nextInoMinusOne",
+ "cachedDentries",
+ "cachedDentriesLen",
+ "MaxCachedDentries",
+ }
+}
+
+func (fs *Filesystem) beforeSave() {}
+
+func (fs *Filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.droppedDentries)
+ stateSinkObject.Save(2, &fs.nextInoMinusOne)
+ stateSinkObject.Save(3, &fs.cachedDentries)
+ stateSinkObject.Save(4, &fs.cachedDentriesLen)
+ stateSinkObject.Save(5, &fs.MaxCachedDentries)
+}
+
+func (fs *Filesystem) afterLoad() {}
+
+func (fs *Filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.droppedDentries)
+ stateSourceObject.Load(2, &fs.nextInoMinusOne)
+ stateSourceObject.Load(3, &fs.cachedDentries)
+ stateSourceObject.Load(4, &fs.cachedDentriesLen)
+ stateSourceObject.Load(5, &fs.MaxCachedDentries)
+}
+
+func (d *Dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.Dentry"
+}
+
+func (d *Dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "flags",
+ "parent",
+ "name",
+ "cached",
+ "dentryEntry",
+ "children",
+ "inode",
+ }
+}
+
+func (d *Dentry) beforeSave() {}
+
+func (d *Dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.flags)
+ stateSinkObject.Save(4, &d.parent)
+ stateSinkObject.Save(5, &d.name)
+ stateSinkObject.Save(6, &d.cached)
+ stateSinkObject.Save(7, &d.dentryEntry)
+ stateSinkObject.Save(8, &d.children)
+ stateSinkObject.Save(9, &d.inode)
+}
+
+func (d *Dentry) afterLoad() {}
+
+func (d *Dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.flags)
+ stateSourceObject.Load(4, &d.parent)
+ stateSourceObject.Load(5, &d.name)
+ stateSourceObject.Load(6, &d.cached)
+ stateSourceObject.Load(7, &d.dentryEntry)
+ stateSourceObject.Load(8, &d.children)
+ stateSourceObject.Load(9, &d.inode)
+}
+
+func (i *inodePlatformFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.inodePlatformFile"
+}
+
+func (i *inodePlatformFile) StateFields() []string {
+ return []string{
+ "hostFD",
+ "fdRefs",
+ "fileMapper",
+ }
+}
+
+func (i *inodePlatformFile) beforeSave() {}
+
+func (i *inodePlatformFile) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.hostFD)
+ stateSinkObject.Save(1, &i.fdRefs)
+ stateSinkObject.Save(2, &i.fileMapper)
+}
+
+func (i *inodePlatformFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.hostFD)
+ stateSourceObject.Load(1, &i.fdRefs)
+ stateSourceObject.Load(2, &i.fileMapper)
+ stateSourceObject.AfterLoad(i.afterLoad)
+}
+
+func (i *CachedMappable) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.CachedMappable"
+}
+
+func (i *CachedMappable) StateFields() []string {
+ return []string{
+ "mappings",
+ "pf",
+ }
+}
+
+func (i *CachedMappable) beforeSave() {}
+
+func (i *CachedMappable) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.mappings)
+ stateSinkObject.Save(1, &i.pf)
+}
+
+func (i *CachedMappable) afterLoad() {}
+
+func (i *CachedMappable) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.mappings)
+ stateSourceObject.Load(1, &i.pf)
+}
+
+func (l *slotList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slotList"
+}
+
+func (l *slotList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *slotList) beforeSave() {}
+
+func (l *slotList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *slotList) afterLoad() {}
+
+func (l *slotList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *slotEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.slotEntry"
+}
+
+func (e *slotEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *slotEntry) beforeSave() {}
+
+func (e *slotEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *slotEntry) afterLoad() {}
+
+func (e *slotEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (r *StaticDirectoryRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticDirectoryRefs"
+}
+
+func (r *StaticDirectoryRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *StaticDirectoryRefs) beforeSave() {}
+
+func (r *StaticDirectoryRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *StaticDirectoryRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (s *StaticSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.StaticSymlink"
+}
+
+func (s *StaticSymlink) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "InodeNoStatFS",
+ "target",
+ }
+}
+
+func (s *StaticSymlink) beforeSave() {}
+
+func (s *StaticSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.InodeAttrs)
+ stateSinkObject.Save(1, &s.InodeNoopRefCount)
+ stateSinkObject.Save(2, &s.InodeSymlink)
+ stateSinkObject.Save(3, &s.InodeNoStatFS)
+ stateSinkObject.Save(4, &s.target)
+}
+
+func (s *StaticSymlink) afterLoad() {}
+
+func (s *StaticSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.InodeAttrs)
+ stateSourceObject.Load(1, &s.InodeNoopRefCount)
+ stateSourceObject.Load(2, &s.InodeSymlink)
+ stateSourceObject.Load(3, &s.InodeNoStatFS)
+ stateSourceObject.Load(4, &s.target)
+}
+
+func (dir *syntheticDirectory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.syntheticDirectory"
+}
+
+func (dir *syntheticDirectory) StateFields() []string {
+ return []string{
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNotSymlink",
+ "OrderedChildren",
+ "syntheticDirectoryRefs",
+ "locks",
+ }
+}
+
+func (dir *syntheticDirectory) beforeSave() {}
+
+func (dir *syntheticDirectory) StateSave(stateSinkObject state.Sink) {
+ dir.beforeSave()
+ stateSinkObject.Save(0, &dir.InodeAlwaysValid)
+ stateSinkObject.Save(1, &dir.InodeAttrs)
+ stateSinkObject.Save(2, &dir.InodeNoStatFS)
+ stateSinkObject.Save(3, &dir.InodeNotSymlink)
+ stateSinkObject.Save(4, &dir.OrderedChildren)
+ stateSinkObject.Save(5, &dir.syntheticDirectoryRefs)
+ stateSinkObject.Save(6, &dir.locks)
+}
+
+func (dir *syntheticDirectory) afterLoad() {}
+
+func (dir *syntheticDirectory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &dir.InodeAlwaysValid)
+ stateSourceObject.Load(1, &dir.InodeAttrs)
+ stateSourceObject.Load(2, &dir.InodeNoStatFS)
+ stateSourceObject.Load(3, &dir.InodeNotSymlink)
+ stateSourceObject.Load(4, &dir.OrderedChildren)
+ stateSourceObject.Load(5, &dir.syntheticDirectoryRefs)
+ stateSourceObject.Load(6, &dir.locks)
+}
+
+func (r *syntheticDirectoryRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/kernfs.syntheticDirectoryRefs"
+}
+
+func (r *syntheticDirectoryRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *syntheticDirectoryRefs) beforeSave() {}
+
+func (r *syntheticDirectoryRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *syntheticDirectoryRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*DynamicBytesFile)(nil))
+ state.Register((*DynamicBytesFD)(nil))
+ state.Register((*SeekEndConfig)(nil))
+ state.Register((*GenericDirectoryFDOptions)(nil))
+ state.Register((*GenericDirectoryFD)(nil))
+ state.Register((*InodeNoopRefCount)(nil))
+ state.Register((*InodeDirectoryNoNewChildren)(nil))
+ state.Register((*InodeNotDirectory)(nil))
+ state.Register((*InodeNotSymlink)(nil))
+ state.Register((*InodeAttrs)(nil))
+ state.Register((*slot)(nil))
+ state.Register((*OrderedChildrenOptions)(nil))
+ state.Register((*OrderedChildren)(nil))
+ state.Register((*renameAcrossDifferentImplementationsError)(nil))
+ state.Register((*InodeSymlink)(nil))
+ state.Register((*StaticDirectory)(nil))
+ state.Register((*InodeAlwaysValid)(nil))
+ state.Register((*InodeTemporary)(nil))
+ state.Register((*InodeNoStatFS)(nil))
+ state.Register((*Filesystem)(nil))
+ state.Register((*Dentry)(nil))
+ state.Register((*inodePlatformFile)(nil))
+ state.Register((*CachedMappable)(nil))
+ state.Register((*slotList)(nil))
+ state.Register((*slotEntry)(nil))
+ state.Register((*StaticDirectoryRefs)(nil))
+ state.Register((*StaticSymlink)(nil))
+ state.Register((*syntheticDirectory)(nil))
+ state.Register((*syntheticDirectoryRefs)(nil))
+}
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_test.go b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
deleted file mode 100644
index 2418eec44..000000000
--- a/pkg/sentry/fsimpl/kernfs/kernfs_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package kernfs_test
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const defaultMode linux.FileMode = 01777
-const staticFileContent = "This is sample content for a static test file."
-
-// RootDentryFn is a generator function for creating the root dentry of a test
-// filesystem. See newTestSystem.
-type RootDentryFn func(context.Context, *auth.Credentials, *filesystem) kernfs.Inode
-
-// newTestSystem sets up a minimal environment for running a test, including an
-// instance of a test filesystem. Tests can control the contents of the
-// filesystem by providing an appropriate rootFn, which should return a
-// pre-populated root dentry.
-func newTestSystem(t *testing.T, rootFn RootDentryFn) *testutil.System {
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
- v := &vfs.VirtualFilesystem{}
- if err := v.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- v.MustRegisterFilesystemType("testfs", &fsType{rootFn: rootFn}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mns, err := v.NewMountNamespace(ctx, creds, "", "testfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("Failed to create testfs root mount: %v", err)
- }
- return testutil.NewSystem(ctx, t, v, mns)
-}
-
-type fsType struct {
- rootFn RootDentryFn
-}
-
-type filesystem struct {
- kernfs.Filesystem
-}
-
-type file struct {
- kernfs.DynamicBytesFile
- content string
-}
-
-func (fs *filesystem) newFile(ctx context.Context, creds *auth.Credentials, content string) kernfs.Inode {
- f := &file{}
- f.content = content
- f.DynamicBytesFile.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), f, 0777)
- return f
-}
-
-func (f *file) Generate(ctx context.Context, buf *bytes.Buffer) error {
- fmt.Fprintf(buf, "%s", f.content)
- return nil
-}
-
-type attrs struct {
- kernfs.InodeAttrs
-}
-
-func (*attrs) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
-}
-
-type readonlyDir struct {
- readonlyDirRefs
- attrs
- kernfs.InodeAlwaysValid
- kernfs.InodeDirectoryNoNewChildren
- kernfs.InodeNoStatFS
- kernfs.InodeNotSymlink
- kernfs.InodeTemporary
- kernfs.OrderedChildren
-
- locks vfs.FileLocks
-}
-
-func (fs *filesystem) newReadonlyDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
- dir := &readonlyDir{}
- dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
- dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- dir.EnableLeakCheck()
- dir.IncLinks(dir.OrderedChildren.Populate(contents))
- return dir
-}
-
-func (d *readonlyDir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
- SeekEnd: kernfs.SeekEndStaticEntries,
- })
- if err != nil {
- return nil, err
- }
- return fd.VFSFileDescription(), nil
-}
-
-func (d *readonlyDir) DecRef(ctx context.Context) {
- d.readonlyDirRefs.DecRef(func() { d.Destroy(ctx) })
-}
-
-type dir struct {
- dirRefs
- attrs
- kernfs.InodeAlwaysValid
- kernfs.InodeNotSymlink
- kernfs.InodeNoStatFS
- kernfs.InodeTemporary
- kernfs.OrderedChildren
-
- locks vfs.FileLocks
-
- fs *filesystem
-}
-
-func (fs *filesystem) newDir(ctx context.Context, creds *auth.Credentials, mode linux.FileMode, contents map[string]kernfs.Inode) kernfs.Inode {
- dir := &dir{}
- dir.fs = fs
- dir.attrs.Init(ctx, creds, 0 /* devMajor */, 0 /* devMinor */, fs.NextIno(), linux.ModeDirectory|mode)
- dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{Writable: true})
- dir.EnableLeakCheck()
-
- dir.IncLinks(dir.OrderedChildren.Populate(contents))
- return dir
-}
-
-func (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- fd, err := kernfs.NewGenericDirectoryFD(rp.Mount(), kd, &d.OrderedChildren, &d.locks, &opts, kernfs.GenericDirectoryFDOptions{
- SeekEnd: kernfs.SeekEndStaticEntries,
- })
- if err != nil {
- return nil, err
- }
- return fd.VFSFileDescription(), nil
-}
-
-func (d *dir) DecRef(ctx context.Context) {
- d.dirRefs.DecRef(func() { d.Destroy(ctx) })
-}
-
-func (d *dir) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (kernfs.Inode, error) {
- creds := auth.CredentialsFromContext(ctx)
- dir := d.fs.newDir(ctx, creds, opts.Mode, nil)
- if err := d.OrderedChildren.Insert(name, dir); err != nil {
- dir.DecRef(ctx)
- return nil, err
- }
- d.TouchCMtime(ctx)
- d.IncLinks(1)
- return dir, nil
-}
-
-func (d *dir) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (kernfs.Inode, error) {
- creds := auth.CredentialsFromContext(ctx)
- f := d.fs.newFile(ctx, creds, "")
- if err := d.OrderedChildren.Insert(name, f); err != nil {
- f.DecRef(ctx)
- return nil, err
- }
- d.TouchCMtime(ctx)
- return f, nil
-}
-
-func (*dir) NewLink(context.Context, string, kernfs.Inode) (kernfs.Inode, error) {
- return nil, syserror.EPERM
-}
-
-func (*dir) NewSymlink(context.Context, string, string) (kernfs.Inode, error) {
- return nil, syserror.EPERM
-}
-
-func (*dir) NewNode(context.Context, string, vfs.MknodOptions) (kernfs.Inode, error) {
- return nil, syserror.EPERM
-}
-
-func (fsType) Name() string {
- return "kernfs"
-}
-
-func (fsType) Release(ctx context.Context) {}
-
-func (fst fsType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opt vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- fs := &filesystem{}
- fs.VFSFilesystem().Init(vfsObj, &fst, fs)
- root := fst.rootFn(ctx, creds, fs)
- var d kernfs.Dentry
- d.Init(&fs.Filesystem, root)
- return fs.VFSFilesystem(), d.VFSDentry(), nil
-}
-
-// -------------------- Remainder of the file are test cases --------------------
-
-func TestBasic(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
- sys.GetDentryOrDie(sys.PathOpAtRoot("file1")).DecRef(sys.Ctx)
-}
-
-func TestMkdirGetDentry(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir1": fs.newDir(ctx, creds, 0755, nil),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("dir1/a new directory")
- if err := sys.VFS.MkdirAt(sys.Ctx, sys.Creds, pop, &vfs.MkdirOptions{Mode: 0755}); err != nil {
- t.Fatalf("MkdirAt for PathOperation %+v failed: %v", pop, err)
- }
- sys.GetDentryOrDie(pop).DecRef(sys.Ctx)
-}
-
-func TestReadStaticFile(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("file1")
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(sys.Ctx)
-
- content, err := sys.ReadToEnd(fd)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- if diff := cmp.Diff(staticFileContent, content); diff != "" {
- t.Fatalf("Read returned unexpected data:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-func TestCreateNewFileInStaticDir(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir1": fs.newDir(ctx, creds, 0755, nil),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("dir1/newfile")
- opts := &vfs.OpenOptions{Flags: linux.O_CREAT | linux.O_EXCL, Mode: defaultMode}
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, opts)
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v, opts:%+v) failed: %v", pop, opts, err)
- }
-
- // Close the file. The file should persist.
- fd.DecRef(sys.Ctx)
-
- fd, err = sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v) = %+v failed: %v", pop, fd, err)
- }
- fd.DecRef(sys.Ctx)
-}
-
-func TestDirFDReadWrite(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, nil)
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("/")
- fd, err := sys.VFS.OpenAt(sys.Ctx, sys.Creds, pop, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(sys.Ctx)
-
- // Read/Write should fail for directory FDs.
- if _, err := fd.Read(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.ReadOptions{}); err != syserror.EISDIR {
- t.Fatalf("Read for directory FD failed with unexpected error: %v", err)
- }
- if _, err := fd.Write(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.WriteOptions{}); err != syserror.EBADF {
- t.Fatalf("Write for directory FD failed with unexpected error: %v", err)
- }
-}
-
-func TestDirFDIterDirents(t *testing.T) {
- sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
- return fs.newReadonlyDir(ctx, creds, 0755, map[string]kernfs.Inode{
- // Fill root with nodes backed by various inode implementations.
- "dir1": fs.newReadonlyDir(ctx, creds, 0755, nil),
- "dir2": fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
- "dir3": fs.newDir(ctx, creds, 0755, nil),
- }),
- "file1": fs.newFile(ctx, creds, staticFileContent),
- })
- })
- defer sys.Destroy()
-
- pop := sys.PathOpAtRoot("/")
- sys.AssertAllDirentTypes(sys.ListDirents(pop), map[string]testutil.DirentType{
- "dir1": linux.DT_DIR,
- "dir2": linux.DT_DIR,
- "file1": linux.DT_REG,
- })
-}
diff --git a/pkg/sentry/fsimpl/kernfs/slot_list.go b/pkg/sentry/fsimpl/kernfs/slot_list.go
new file mode 100644
index 000000000..c6cd74660
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/slot_list.go
@@ -0,0 +1,193 @@
+package kernfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type slotElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (slotElementMapper) linkerFor(elem *slot) *slot { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type slotList struct {
+ head *slot
+ tail *slot
+}
+
+// Reset resets list l to the empty state.
+func (l *slotList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+func (l *slotList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+func (l *slotList) Front() *slot {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+func (l *slotList) Back() *slot {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+func (l *slotList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (slotElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+func (l *slotList) PushFront(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ slotElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+func (l *slotList) PushBack(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ slotElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+func (l *slotList) PushBackList(m *slotList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ slotElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ slotElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+func (l *slotList) InsertAfter(b, e *slot) {
+ bLinker := slotElementMapper{}.linkerFor(b)
+ eLinker := slotElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ slotElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+func (l *slotList) InsertBefore(a, e *slot) {
+ aLinker := slotElementMapper{}.linkerFor(a)
+ eLinker := slotElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ slotElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+func (l *slotList) Remove(e *slot) {
+ linker := slotElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ slotElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ slotElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type slotEntry struct {
+ next *slot
+ prev *slot
+}
+
+// Next returns the entry that follows e in the list.
+func (e *slotEntry) Next() *slot {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+func (e *slotEntry) Prev() *slot {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+func (e *slotEntry) SetNext(elem *slot) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+func (e *slotEntry) SetPrev(elem *slot) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/kernfs/static_directory_refs.go b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
new file mode 100644
index 000000000..d86cf76fe
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/static_directory_refs.go
@@ -0,0 +1,128 @@
+package kernfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const StaticDirectoryenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var StaticDirectoryobj *StaticDirectory
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type StaticDirectoryRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *StaticDirectoryRefs) RefType() string {
+ return fmt.Sprintf("%T", StaticDirectoryobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *StaticDirectoryRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *StaticDirectoryRefs) LogRefs() bool {
+ return StaticDirectoryenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *StaticDirectoryRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *StaticDirectoryRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *StaticDirectoryRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *StaticDirectoryRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
new file mode 100644
index 000000000..446837e30
--- /dev/null
+++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory_refs.go
@@ -0,0 +1,128 @@
+package kernfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const syntheticDirectoryenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var syntheticDirectoryobj *syntheticDirectory
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type syntheticDirectoryRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *syntheticDirectoryRefs) RefType() string {
+ return fmt.Sprintf("%T", syntheticDirectoryobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *syntheticDirectoryRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *syntheticDirectoryRefs) LogRefs() bool {
+ return syntheticDirectoryenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *syntheticDirectoryRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *syntheticDirectoryRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *syntheticDirectoryRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *syntheticDirectoryRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/overlay/BUILD b/pkg/sentry/fsimpl/overlay/BUILD
deleted file mode 100644
index bf13bbbf4..000000000
--- a/pkg/sentry/fsimpl/overlay/BUILD
+++ /dev/null
@@ -1,47 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "overlay",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "overlay",
- srcs = [
- "copy_up.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "overlay.go",
- "regular_file.go",
- "save_restore.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/overlay/fstree.go b/pkg/sentry/fsimpl/overlay/fstree.go
new file mode 100644
index 000000000..c3eb062ed
--- /dev/null
+++ b/pkg/sentry/fsimpl/overlay/fstree.go
@@ -0,0 +1,55 @@
+package overlay
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go b/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go
new file mode 100644
index 000000000..6c01b2fbf
--- /dev/null
+++ b/pkg/sentry/fsimpl/overlay/overlay_state_autogen.go
@@ -0,0 +1,311 @@
+// automatically generated by stateify.
+
+package overlay
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "off",
+ "dirents",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &fd.off)
+ stateSinkObject.Save(4, &fd.dirents)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &fd.off)
+ stateSourceObject.Load(4, &fd.dirents)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (f *FilesystemOptions) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.FilesystemOptions"
+}
+
+func (f *FilesystemOptions) StateFields() []string {
+ return []string{
+ "UpperRoot",
+ "LowerRoots",
+ }
+}
+
+func (f *FilesystemOptions) beforeSave() {}
+
+func (f *FilesystemOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.UpperRoot)
+ stateSinkObject.Save(1, &f.LowerRoots)
+}
+
+func (f *FilesystemOptions) afterLoad() {}
+
+func (f *FilesystemOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.UpperRoot)
+ stateSourceObject.Load(1, &f.LowerRoots)
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "opts",
+ "creds",
+ "dirDevMinor",
+ "lowerDevMinors",
+ "lastDirIno",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.opts)
+ stateSinkObject.Save(2, &fs.creds)
+ stateSinkObject.Save(3, &fs.dirDevMinor)
+ stateSinkObject.Save(4, &fs.lowerDevMinors)
+ stateSinkObject.Save(5, &fs.lastDirIno)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.opts)
+ stateSourceObject.Load(2, &fs.creds)
+ stateSourceObject.Load(3, &fs.dirDevMinor)
+ stateSourceObject.Load(4, &fs.lowerDevMinors)
+ stateSourceObject.Load(5, &fs.lastDirIno)
+}
+
+func (l *layerDevNumber) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.layerDevNumber"
+}
+
+func (l *layerDevNumber) StateFields() []string {
+ return []string{
+ "major",
+ "minor",
+ }
+}
+
+func (l *layerDevNumber) beforeSave() {}
+
+func (l *layerDevNumber) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.major)
+ stateSinkObject.Save(1, &l.minor)
+}
+
+func (l *layerDevNumber) afterLoad() {}
+
+func (l *layerDevNumber) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.major)
+ stateSourceObject.Load(1, &l.minor)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "refs",
+ "fs",
+ "mode",
+ "uid",
+ "gid",
+ "copiedUp",
+ "parent",
+ "name",
+ "children",
+ "dirents",
+ "upperVD",
+ "lowerVDs",
+ "inlineLowerVDs",
+ "devMajor",
+ "devMinor",
+ "ino",
+ "lowerMappings",
+ "wrappedMappable",
+ "isMappable",
+ "locks",
+ "watches",
+ }
+}
+
+func (d *dentry) beforeSave() {}
+
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.refs)
+ stateSinkObject.Save(2, &d.fs)
+ stateSinkObject.Save(3, &d.mode)
+ stateSinkObject.Save(4, &d.uid)
+ stateSinkObject.Save(5, &d.gid)
+ stateSinkObject.Save(6, &d.copiedUp)
+ stateSinkObject.Save(7, &d.parent)
+ stateSinkObject.Save(8, &d.name)
+ stateSinkObject.Save(9, &d.children)
+ stateSinkObject.Save(10, &d.dirents)
+ stateSinkObject.Save(11, &d.upperVD)
+ stateSinkObject.Save(12, &d.lowerVDs)
+ stateSinkObject.Save(13, &d.inlineLowerVDs)
+ stateSinkObject.Save(14, &d.devMajor)
+ stateSinkObject.Save(15, &d.devMinor)
+ stateSinkObject.Save(16, &d.ino)
+ stateSinkObject.Save(17, &d.lowerMappings)
+ stateSinkObject.Save(18, &d.wrappedMappable)
+ stateSinkObject.Save(19, &d.isMappable)
+ stateSinkObject.Save(20, &d.locks)
+ stateSinkObject.Save(21, &d.watches)
+}
+
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.refs)
+ stateSourceObject.Load(2, &d.fs)
+ stateSourceObject.Load(3, &d.mode)
+ stateSourceObject.Load(4, &d.uid)
+ stateSourceObject.Load(5, &d.gid)
+ stateSourceObject.Load(6, &d.copiedUp)
+ stateSourceObject.Load(7, &d.parent)
+ stateSourceObject.Load(8, &d.name)
+ stateSourceObject.Load(9, &d.children)
+ stateSourceObject.Load(10, &d.dirents)
+ stateSourceObject.Load(11, &d.upperVD)
+ stateSourceObject.Load(12, &d.lowerVDs)
+ stateSourceObject.Load(13, &d.inlineLowerVDs)
+ stateSourceObject.Load(14, &d.devMajor)
+ stateSourceObject.Load(15, &d.devMinor)
+ stateSourceObject.Load(16, &d.ino)
+ stateSourceObject.Load(17, &d.lowerMappings)
+ stateSourceObject.Load(18, &d.wrappedMappable)
+ stateSourceObject.Load(19, &d.isMappable)
+ stateSourceObject.Load(20, &d.locks)
+ stateSourceObject.Load(21, &d.watches)
+ stateSourceObject.AfterLoad(d.afterLoad)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/overlay.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "copiedUp",
+ "cachedFD",
+ "cachedFlags",
+ "lowerWaiters",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.copiedUp)
+ stateSinkObject.Save(2, &fd.cachedFD)
+ stateSinkObject.Save(3, &fd.cachedFlags)
+ stateSinkObject.Save(4, &fd.lowerWaiters)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.copiedUp)
+ stateSourceObject.Load(2, &fd.cachedFD)
+ stateSourceObject.Load(3, &fd.cachedFlags)
+ stateSourceObject.Load(4, &fd.lowerWaiters)
+}
+
+func init() {
+ state.Register((*directoryFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*FilesystemOptions)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*layerDevNumber)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*fileDescription)(nil))
+ state.Register((*regularFileFD)(nil))
+}
diff --git a/pkg/sentry/fsimpl/pipefs/BUILD b/pkg/sentry/fsimpl/pipefs/BUILD
deleted file mode 100644
index 5950a2d59..000000000
--- a/pkg/sentry/fsimpl/pipefs/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "pipefs",
- srcs = ["pipefs.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go b/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go
new file mode 100644
index 000000000..474f83cdf
--- /dev/null
+++ b/pkg/sentry/fsimpl/pipefs/pipefs_state_autogen.go
@@ -0,0 +1,105 @@
+// automatically generated by stateify.
+
+package pipefs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (f *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.filesystemType"
+}
+
+func (f *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (f *filesystemType) beforeSave() {}
+
+func (f *filesystemType) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *filesystemType) afterLoad() {}
+
+func (f *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/pipefs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "InodeNoopRefCount",
+ "locks",
+ "pipe",
+ "ino",
+ "uid",
+ "gid",
+ "ctime",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeNotDirectory)
+ stateSinkObject.Save(1, &i.InodeNotSymlink)
+ stateSinkObject.Save(2, &i.InodeNoopRefCount)
+ stateSinkObject.Save(3, &i.locks)
+ stateSinkObject.Save(4, &i.pipe)
+ stateSinkObject.Save(5, &i.ino)
+ stateSinkObject.Save(6, &i.uid)
+ stateSinkObject.Save(7, &i.gid)
+ stateSinkObject.Save(8, &i.ctime)
+}
+
+func (i *inode) afterLoad() {}
+
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeNotDirectory)
+ stateSourceObject.Load(1, &i.InodeNotSymlink)
+ stateSourceObject.Load(2, &i.InodeNoopRefCount)
+ stateSourceObject.Load(3, &i.locks)
+ stateSourceObject.Load(4, &i.pipe)
+ stateSourceObject.Load(5, &i.ino)
+ stateSourceObject.Load(6, &i.uid)
+ stateSourceObject.Load(7, &i.gid)
+ stateSourceObject.Load(8, &i.ctime)
+}
+
+func init() {
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+}
diff --git a/pkg/sentry/fsimpl/proc/BUILD b/pkg/sentry/fsimpl/proc/BUILD
deleted file mode 100644
index 5196a2a80..000000000
--- a/pkg/sentry/fsimpl/proc/BUILD
+++ /dev/null
@@ -1,131 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "fd_dir_inode_refs",
- out = "fd_dir_inode_refs.go",
- package = "proc",
- prefix = "fdDirInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "fdDirInode",
- },
-)
-
-go_template_instance(
- name = "fd_info_dir_inode_refs",
- out = "fd_info_dir_inode_refs.go",
- package = "proc",
- prefix = "fdInfoDirInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "fdInfoDirInode",
- },
-)
-
-go_template_instance(
- name = "subtasks_inode_refs",
- out = "subtasks_inode_refs.go",
- package = "proc",
- prefix = "subtasksInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "subtasksInode",
- },
-)
-
-go_template_instance(
- name = "task_inode_refs",
- out = "task_inode_refs.go",
- package = "proc",
- prefix = "taskInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "taskInode",
- },
-)
-
-go_template_instance(
- name = "tasks_inode_refs",
- out = "tasks_inode_refs.go",
- package = "proc",
- prefix = "tasksInode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "tasksInode",
- },
-)
-
-go_library(
- name = "proc",
- srcs = [
- "fd_dir_inode_refs.go",
- "fd_info_dir_inode_refs.go",
- "filesystem.go",
- "subtasks.go",
- "subtasks_inode_refs.go",
- "task.go",
- "task_fds.go",
- "task_files.go",
- "task_inode_refs.go",
- "task_net.go",
- "tasks.go",
- "tasks_files.go",
- "tasks_inode_refs.go",
- "tasks_sys.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/mm",
- "//pkg/sentry/socket",
- "//pkg/sentry/socket/unix",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/tcpip/header",
- "//pkg/tcpip/network/ipv4",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "proc_test",
- size = "small",
- srcs = [
- "tasks_sys_test.go",
- "tasks_test.go",
- ],
- library = ":proc",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/inet",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
new file mode 100644
index 000000000..895298eb9
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/fd_dir_inode_refs.go
@@ -0,0 +1,128 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const fdDirInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var fdDirInodeobj *fdDirInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type fdDirInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *fdDirInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", fdDirInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *fdDirInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *fdDirInodeRefs) LogRefs() bool {
+ return fdDirInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *fdDirInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *fdDirInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *fdDirInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *fdDirInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *fdDirInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *fdDirInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
new file mode 100644
index 000000000..8e3458485
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/fd_info_dir_inode_refs.go
@@ -0,0 +1,128 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const fdInfoDirInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var fdInfoDirInodeobj *fdInfoDirInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type fdInfoDirInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *fdInfoDirInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", fdInfoDirInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *fdInfoDirInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *fdInfoDirInodeRefs) LogRefs() bool {
+ return fdInfoDirInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *fdInfoDirInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *fdInfoDirInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *fdInfoDirInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *fdInfoDirInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/proc_state_autogen.go b/pkg/sentry/fsimpl/proc/proc_state_autogen.go
new file mode 100644
index 000000000..f2ba64a7f
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/proc_state_autogen.go
@@ -0,0 +1,2067 @@
+// automatically generated by stateify.
+
+package proc
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *fdDirInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDirInodeRefs"
+}
+
+func (r *fdDirInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *fdDirInodeRefs) beforeSave() {}
+
+func (r *fdDirInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *fdDirInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (r *fdInfoDirInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoDirInodeRefs"
+}
+
+func (r *fdInfoDirInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *fdInfoDirInodeRefs) beforeSave() {}
+
+func (r *fdInfoDirInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *fdInfoDirInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (ft *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.FilesystemType"
+}
+
+func (ft *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (ft *FilesystemType) beforeSave() {}
+
+func (ft *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ ft.beforeSave()
+}
+
+func (ft *FilesystemType) afterLoad() {}
+
+func (ft *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (s *staticFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.staticFile"
+}
+
+func (s *staticFile) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "StaticData",
+ }
+}
+
+func (s *staticFile) beforeSave() {}
+
+func (s *staticFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.StaticData)
+}
+
+func (s *staticFile) afterLoad() {}
+
+func (s *staticFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.StaticData)
+}
+
+func (i *InternalData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.InternalData"
+}
+
+func (i *InternalData) StateFields() []string {
+ return []string{
+ "Cgroups",
+ }
+}
+
+func (i *InternalData) beforeSave() {}
+
+func (i *InternalData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Cgroups)
+}
+
+func (i *InternalData) afterLoad() {}
+
+func (i *InternalData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Cgroups)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func (i *subtasksInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksInode"
+}
+
+func (i *subtasksInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "subtasksInodeRefs",
+ "locks",
+ "fs",
+ "task",
+ "pidns",
+ "cgroupControllers",
+ }
+}
+
+func (i *subtasksInode) beforeSave() {}
+
+func (i *subtasksInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.subtasksInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.fs)
+ stateSinkObject.Save(10, &i.task)
+ stateSinkObject.Save(11, &i.pidns)
+ stateSinkObject.Save(12, &i.cgroupControllers)
+}
+
+func (i *subtasksInode) afterLoad() {}
+
+func (i *subtasksInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.subtasksInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.fs)
+ stateSourceObject.Load(10, &i.task)
+ stateSourceObject.Load(11, &i.pidns)
+ stateSourceObject.Load(12, &i.cgroupControllers)
+}
+
+func (fd *subtasksFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksFD"
+}
+
+func (fd *subtasksFD) StateFields() []string {
+ return []string{
+ "GenericDirectoryFD",
+ "task",
+ }
+}
+
+func (fd *subtasksFD) beforeSave() {}
+
+func (fd *subtasksFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.GenericDirectoryFD)
+ stateSinkObject.Save(1, &fd.task)
+}
+
+func (fd *subtasksFD) afterLoad() {}
+
+func (fd *subtasksFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.GenericDirectoryFD)
+ stateSourceObject.Load(1, &fd.task)
+}
+
+func (r *subtasksInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.subtasksInodeRefs"
+}
+
+func (r *subtasksInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *subtasksInodeRefs) beforeSave() {}
+
+func (r *subtasksInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *subtasksInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *taskInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskInode"
+}
+
+func (i *taskInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "taskInodeRefs",
+ "locks",
+ "task",
+ }
+}
+
+func (i *taskInode) beforeSave() {}
+
+func (i *taskInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAttrs)
+ stateSinkObject.Save(2, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+ stateSinkObject.Save(4, &i.InodeTemporary)
+ stateSinkObject.Save(5, &i.OrderedChildren)
+ stateSinkObject.Save(6, &i.taskInodeRefs)
+ stateSinkObject.Save(7, &i.locks)
+ stateSinkObject.Save(8, &i.task)
+}
+
+func (i *taskInode) afterLoad() {}
+
+func (i *taskInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAttrs)
+ stateSourceObject.Load(2, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+ stateSourceObject.Load(4, &i.InodeTemporary)
+ stateSourceObject.Load(5, &i.OrderedChildren)
+ stateSourceObject.Load(6, &i.taskInodeRefs)
+ stateSourceObject.Load(7, &i.locks)
+ stateSourceObject.Load(8, &i.task)
+}
+
+func (i *taskOwnedInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskOwnedInode"
+}
+
+func (i *taskOwnedInode) StateFields() []string {
+ return []string{
+ "Inode",
+ "owner",
+ }
+}
+
+func (i *taskOwnedInode) beforeSave() {}
+
+func (i *taskOwnedInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.Inode)
+ stateSinkObject.Save(1, &i.owner)
+}
+
+func (i *taskOwnedInode) afterLoad() {}
+
+func (i *taskOwnedInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.Inode)
+ stateSourceObject.Load(1, &i.owner)
+}
+
+func (i *fdDir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDir"
+}
+
+func (i *fdDir) StateFields() []string {
+ return []string{
+ "locks",
+ "fs",
+ "task",
+ "produceSymlink",
+ }
+}
+
+func (i *fdDir) beforeSave() {}
+
+func (i *fdDir) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.locks)
+ stateSinkObject.Save(1, &i.fs)
+ stateSinkObject.Save(2, &i.task)
+ stateSinkObject.Save(3, &i.produceSymlink)
+}
+
+func (i *fdDir) afterLoad() {}
+
+func (i *fdDir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.locks)
+ stateSourceObject.Load(1, &i.fs)
+ stateSourceObject.Load(2, &i.task)
+ stateSourceObject.Load(3, &i.produceSymlink)
+}
+
+func (i *fdDirInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdDirInode"
+}
+
+func (i *fdDirInode) StateFields() []string {
+ return []string{
+ "fdDir",
+ "fdDirInodeRefs",
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ }
+}
+
+func (i *fdDirInode) beforeSave() {}
+
+func (i *fdDirInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fdDir)
+ stateSinkObject.Save(1, &i.fdDirInodeRefs)
+ stateSinkObject.Save(2, &i.implStatFS)
+ stateSinkObject.Save(3, &i.InodeAlwaysValid)
+ stateSinkObject.Save(4, &i.InodeAttrs)
+ stateSinkObject.Save(5, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(6, &i.InodeNotSymlink)
+ stateSinkObject.Save(7, &i.InodeTemporary)
+ stateSinkObject.Save(8, &i.OrderedChildren)
+}
+
+func (i *fdDirInode) afterLoad() {}
+
+func (i *fdDirInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fdDir)
+ stateSourceObject.Load(1, &i.fdDirInodeRefs)
+ stateSourceObject.Load(2, &i.implStatFS)
+ stateSourceObject.Load(3, &i.InodeAlwaysValid)
+ stateSourceObject.Load(4, &i.InodeAttrs)
+ stateSourceObject.Load(5, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(6, &i.InodeNotSymlink)
+ stateSourceObject.Load(7, &i.InodeTemporary)
+ stateSourceObject.Load(8, &i.OrderedChildren)
+}
+
+func (s *fdSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdSymlink"
+}
+
+func (s *fdSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "task",
+ "fd",
+ }
+}
+
+func (s *fdSymlink) beforeSave() {}
+
+func (s *fdSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.task)
+ stateSinkObject.Save(5, &s.fd)
+}
+
+func (s *fdSymlink) afterLoad() {}
+
+func (s *fdSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.task)
+ stateSourceObject.Load(5, &s.fd)
+}
+
+func (i *fdInfoDirInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoDirInode"
+}
+
+func (i *fdInfoDirInode) StateFields() []string {
+ return []string{
+ "fdDir",
+ "fdInfoDirInodeRefs",
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ }
+}
+
+func (i *fdInfoDirInode) beforeSave() {}
+
+func (i *fdInfoDirInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fdDir)
+ stateSinkObject.Save(1, &i.fdInfoDirInodeRefs)
+ stateSinkObject.Save(2, &i.implStatFS)
+ stateSinkObject.Save(3, &i.InodeAlwaysValid)
+ stateSinkObject.Save(4, &i.InodeAttrs)
+ stateSinkObject.Save(5, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(6, &i.InodeNotSymlink)
+ stateSinkObject.Save(7, &i.InodeTemporary)
+ stateSinkObject.Save(8, &i.OrderedChildren)
+}
+
+func (i *fdInfoDirInode) afterLoad() {}
+
+func (i *fdInfoDirInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fdDir)
+ stateSourceObject.Load(1, &i.fdInfoDirInodeRefs)
+ stateSourceObject.Load(2, &i.implStatFS)
+ stateSourceObject.Load(3, &i.InodeAlwaysValid)
+ stateSourceObject.Load(4, &i.InodeAttrs)
+ stateSourceObject.Load(5, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(6, &i.InodeNotSymlink)
+ stateSourceObject.Load(7, &i.InodeTemporary)
+ stateSourceObject.Load(8, &i.OrderedChildren)
+}
+
+func (d *fdInfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.fdInfoData"
+}
+
+func (d *fdInfoData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "fd",
+ }
+}
+
+func (d *fdInfoData) beforeSave() {}
+
+func (d *fdInfoData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+ stateSinkObject.Save(2, &d.fd)
+}
+
+func (d *fdInfoData) afterLoad() {}
+
+func (d *fdInfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+ stateSourceObject.Load(2, &d.fd)
+}
+
+func (d *auxvData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.auxvData"
+}
+
+func (d *auxvData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *auxvData) beforeSave() {}
+
+func (d *auxvData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *auxvData) afterLoad() {}
+
+func (d *auxvData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *cmdlineData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cmdlineData"
+}
+
+func (d *cmdlineData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "arg",
+ }
+}
+
+func (d *cmdlineData) beforeSave() {}
+
+func (d *cmdlineData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+ stateSinkObject.Save(2, &d.arg)
+}
+
+func (d *cmdlineData) afterLoad() {}
+
+func (d *cmdlineData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+ stateSourceObject.Load(2, &d.arg)
+}
+
+func (i *commInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.commInode"
+}
+
+func (i *commInode) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (i *commInode) beforeSave() {}
+
+func (i *commInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.task)
+}
+
+func (i *commInode) afterLoad() {}
+
+func (i *commInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.task)
+}
+
+func (d *commData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.commData"
+}
+
+func (d *commData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *commData) beforeSave() {}
+
+func (d *commData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *commData) afterLoad() {}
+
+func (d *commData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *idMapData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.idMapData"
+}
+
+func (d *idMapData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "gids",
+ }
+}
+
+func (d *idMapData) beforeSave() {}
+
+func (d *idMapData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+ stateSinkObject.Save(2, &d.gids)
+}
+
+func (d *idMapData) afterLoad() {}
+
+func (d *idMapData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+ stateSourceObject.Load(2, &d.gids)
+}
+
+func (f *memInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.memInode"
+}
+
+func (f *memInode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoStatFS",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "task",
+ "locks",
+ }
+}
+
+func (f *memInode) beforeSave() {}
+
+func (f *memInode) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.InodeAttrs)
+ stateSinkObject.Save(1, &f.InodeNoStatFS)
+ stateSinkObject.Save(2, &f.InodeNoopRefCount)
+ stateSinkObject.Save(3, &f.InodeNotDirectory)
+ stateSinkObject.Save(4, &f.InodeNotSymlink)
+ stateSinkObject.Save(5, &f.task)
+ stateSinkObject.Save(6, &f.locks)
+}
+
+func (f *memInode) afterLoad() {}
+
+func (f *memInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.InodeAttrs)
+ stateSourceObject.Load(1, &f.InodeNoStatFS)
+ stateSourceObject.Load(2, &f.InodeNoopRefCount)
+ stateSourceObject.Load(3, &f.InodeNotDirectory)
+ stateSourceObject.Load(4, &f.InodeNotSymlink)
+ stateSourceObject.Load(5, &f.task)
+ stateSourceObject.Load(6, &f.locks)
+}
+
+func (fd *memFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.memFD"
+}
+
+func (fd *memFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "inode",
+ "offset",
+ }
+}
+
+func (fd *memFD) beforeSave() {}
+
+func (fd *memFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+ stateSinkObject.Save(3, &fd.inode)
+ stateSinkObject.Save(4, &fd.offset)
+}
+
+func (fd *memFD) afterLoad() {}
+
+func (fd *memFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+ stateSourceObject.Load(3, &fd.inode)
+ stateSourceObject.Load(4, &fd.offset)
+}
+
+func (d *mapsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mapsData"
+}
+
+func (d *mapsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *mapsData) beforeSave() {}
+
+func (d *mapsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *mapsData) afterLoad() {}
+
+func (d *mapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (d *smapsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.smapsData"
+}
+
+func (d *smapsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (d *smapsData) beforeSave() {}
+
+func (d *smapsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.task)
+}
+
+func (d *smapsData) afterLoad() {}
+
+func (d *smapsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.task)
+}
+
+func (s *taskStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskStatData"
+}
+
+func (s *taskStatData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "tgstats",
+ "pidns",
+ }
+}
+
+func (s *taskStatData) beforeSave() {}
+
+func (s *taskStatData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+ stateSinkObject.Save(2, &s.tgstats)
+ stateSinkObject.Save(3, &s.pidns)
+}
+
+func (s *taskStatData) afterLoad() {}
+
+func (s *taskStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+ stateSourceObject.Load(2, &s.tgstats)
+ stateSourceObject.Load(3, &s.pidns)
+}
+
+func (s *statmData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statmData"
+}
+
+func (s *statmData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (s *statmData) beforeSave() {}
+
+func (s *statmData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+}
+
+func (s *statmData) afterLoad() {}
+
+func (s *statmData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+}
+
+func (s *statusData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statusData"
+}
+
+func (s *statusData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ "pidns",
+ }
+}
+
+func (s *statusData) beforeSave() {}
+
+func (s *statusData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.DynamicBytesFile)
+ stateSinkObject.Save(1, &s.task)
+ stateSinkObject.Save(2, &s.pidns)
+}
+
+func (s *statusData) afterLoad() {}
+
+func (s *statusData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.DynamicBytesFile)
+ stateSourceObject.Load(1, &s.task)
+ stateSourceObject.Load(2, &s.pidns)
+}
+
+func (i *ioData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ioData"
+}
+
+func (i *ioData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "ioUsage",
+ }
+}
+
+func (i *ioData) beforeSave() {}
+
+func (i *ioData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.ioUsage)
+}
+
+func (i *ioData) afterLoad() {}
+
+func (i *ioData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.ioUsage)
+}
+
+func (o *oomScoreAdj) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.oomScoreAdj"
+}
+
+func (o *oomScoreAdj) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (o *oomScoreAdj) beforeSave() {}
+
+func (o *oomScoreAdj) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.DynamicBytesFile)
+ stateSinkObject.Save(1, &o.task)
+}
+
+func (o *oomScoreAdj) afterLoad() {}
+
+func (o *oomScoreAdj) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.DynamicBytesFile)
+ stateSourceObject.Load(1, &o.task)
+}
+
+func (s *exeSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.exeSymlink"
+}
+
+func (s *exeSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "task",
+ }
+}
+
+func (s *exeSymlink) beforeSave() {}
+
+func (s *exeSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.task)
+}
+
+func (s *exeSymlink) afterLoad() {}
+
+func (s *exeSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.task)
+}
+
+func (s *cwdSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cwdSymlink"
+}
+
+func (s *cwdSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "task",
+ }
+}
+
+func (s *cwdSymlink) beforeSave() {}
+
+func (s *cwdSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.task)
+}
+
+func (s *cwdSymlink) afterLoad() {}
+
+func (s *cwdSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.task)
+}
+
+func (i *mountInfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mountInfoData"
+}
+
+func (i *mountInfoData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (i *mountInfoData) beforeSave() {}
+
+func (i *mountInfoData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.task)
+}
+
+func (i *mountInfoData) afterLoad() {}
+
+func (i *mountInfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.task)
+}
+
+func (i *mountsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mountsData"
+}
+
+func (i *mountsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "task",
+ }
+}
+
+func (i *mountsData) beforeSave() {}
+
+func (i *mountsData) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.DynamicBytesFile)
+ stateSinkObject.Save(1, &i.task)
+}
+
+func (i *mountsData) afterLoad() {}
+
+func (i *mountsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.DynamicBytesFile)
+ stateSourceObject.Load(1, &i.task)
+}
+
+func (s *namespaceSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceSymlink"
+}
+
+func (s *namespaceSymlink) StateFields() []string {
+ return []string{
+ "StaticSymlink",
+ "task",
+ }
+}
+
+func (s *namespaceSymlink) beforeSave() {}
+
+func (s *namespaceSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.StaticSymlink)
+ stateSinkObject.Save(1, &s.task)
+}
+
+func (s *namespaceSymlink) afterLoad() {}
+
+func (s *namespaceSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.StaticSymlink)
+ stateSourceObject.Load(1, &s.task)
+}
+
+func (i *namespaceInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceInode"
+}
+
+func (i *namespaceInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "locks",
+ }
+}
+
+func (i *namespaceInode) beforeSave() {}
+
+func (i *namespaceInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAttrs)
+ stateSinkObject.Save(2, &i.InodeNoopRefCount)
+ stateSinkObject.Save(3, &i.InodeNotDirectory)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.locks)
+}
+
+func (i *namespaceInode) afterLoad() {}
+
+func (i *namespaceInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAttrs)
+ stateSourceObject.Load(2, &i.InodeNoopRefCount)
+ stateSourceObject.Load(3, &i.InodeNotDirectory)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.locks)
+}
+
+func (fd *namespaceFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.namespaceFD"
+}
+
+func (fd *namespaceFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ "vfsfd",
+ "inode",
+ }
+}
+
+func (fd *namespaceFD) beforeSave() {}
+
+func (fd *namespaceFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.LockFD)
+ stateSinkObject.Save(2, &fd.vfsfd)
+ stateSinkObject.Save(3, &fd.inode)
+}
+
+func (fd *namespaceFD) afterLoad() {}
+
+func (fd *namespaceFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.LockFD)
+ stateSourceObject.Load(2, &fd.vfsfd)
+ stateSourceObject.Load(3, &fd.inode)
+}
+
+func (r *taskInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.taskInodeRefs"
+}
+
+func (r *taskInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *taskInodeRefs) beforeSave() {}
+
+func (r *taskInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *taskInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (n *ifinet6) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ifinet6"
+}
+
+func (n *ifinet6) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (n *ifinet6) beforeSave() {}
+
+func (n *ifinet6) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.stack)
+}
+
+func (n *ifinet6) afterLoad() {}
+
+func (n *ifinet6) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.stack)
+}
+
+func (n *netDevData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netDevData"
+}
+
+func (n *netDevData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (n *netDevData) beforeSave() {}
+
+func (n *netDevData) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.stack)
+}
+
+func (n *netDevData) afterLoad() {}
+
+func (n *netDevData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.stack)
+}
+
+func (n *netUnixData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netUnixData"
+}
+
+func (n *netUnixData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (n *netUnixData) beforeSave() {}
+
+func (n *netUnixData) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.DynamicBytesFile)
+ stateSinkObject.Save(1, &n.kernel)
+}
+
+func (n *netUnixData) afterLoad() {}
+
+func (n *netUnixData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.DynamicBytesFile)
+ stateSourceObject.Load(1, &n.kernel)
+}
+
+func (d *netTCPData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netTCPData"
+}
+
+func (d *netTCPData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netTCPData) beforeSave() {}
+
+func (d *netTCPData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netTCPData) afterLoad() {}
+
+func (d *netTCPData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netTCP6Data) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netTCP6Data"
+}
+
+func (d *netTCP6Data) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netTCP6Data) beforeSave() {}
+
+func (d *netTCP6Data) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netTCP6Data) afterLoad() {}
+
+func (d *netTCP6Data) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netUDPData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netUDPData"
+}
+
+func (d *netUDPData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "kernel",
+ }
+}
+
+func (d *netUDPData) beforeSave() {}
+
+func (d *netUDPData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.kernel)
+}
+
+func (d *netUDPData) afterLoad() {}
+
+func (d *netUDPData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.kernel)
+}
+
+func (d *netSnmpData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netSnmpData"
+}
+
+func (d *netSnmpData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netSnmpData) beforeSave() {}
+
+func (d *netSnmpData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netSnmpData) afterLoad() {}
+
+func (d *netSnmpData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (s *snmpLine) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.snmpLine"
+}
+
+func (s *snmpLine) StateFields() []string {
+ return []string{
+ "prefix",
+ "header",
+ }
+}
+
+func (s *snmpLine) beforeSave() {}
+
+func (s *snmpLine) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.prefix)
+ stateSinkObject.Save(1, &s.header)
+}
+
+func (s *snmpLine) afterLoad() {}
+
+func (s *snmpLine) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.prefix)
+ stateSourceObject.Load(1, &s.header)
+}
+
+func (d *netRouteData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netRouteData"
+}
+
+func (d *netRouteData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netRouteData) beforeSave() {}
+
+func (d *netRouteData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netRouteData) afterLoad() {}
+
+func (d *netRouteData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (d *netStatData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.netStatData"
+}
+
+func (d *netStatData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *netStatData) beforeSave() {}
+
+func (d *netStatData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *netStatData) afterLoad() {}
+
+func (d *netStatData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.stack)
+}
+
+func (i *tasksInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tasksInode"
+}
+
+func (i *tasksInode) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeDirectoryNoNewChildren",
+ "InodeNotSymlink",
+ "InodeTemporary",
+ "OrderedChildren",
+ "tasksInodeRefs",
+ "locks",
+ "fs",
+ "pidns",
+ "cgroupControllers",
+ }
+}
+
+func (i *tasksInode) beforeSave() {}
+
+func (i *tasksInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.implStatFS)
+ stateSinkObject.Save(1, &i.InodeAlwaysValid)
+ stateSinkObject.Save(2, &i.InodeAttrs)
+ stateSinkObject.Save(3, &i.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(4, &i.InodeNotSymlink)
+ stateSinkObject.Save(5, &i.InodeTemporary)
+ stateSinkObject.Save(6, &i.OrderedChildren)
+ stateSinkObject.Save(7, &i.tasksInodeRefs)
+ stateSinkObject.Save(8, &i.locks)
+ stateSinkObject.Save(9, &i.fs)
+ stateSinkObject.Save(10, &i.pidns)
+ stateSinkObject.Save(11, &i.cgroupControllers)
+}
+
+func (i *tasksInode) afterLoad() {}
+
+func (i *tasksInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.implStatFS)
+ stateSourceObject.Load(1, &i.InodeAlwaysValid)
+ stateSourceObject.Load(2, &i.InodeAttrs)
+ stateSourceObject.Load(3, &i.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(4, &i.InodeNotSymlink)
+ stateSourceObject.Load(5, &i.InodeTemporary)
+ stateSourceObject.Load(6, &i.OrderedChildren)
+ stateSourceObject.Load(7, &i.tasksInodeRefs)
+ stateSourceObject.Load(8, &i.locks)
+ stateSourceObject.Load(9, &i.fs)
+ stateSourceObject.Load(10, &i.pidns)
+ stateSourceObject.Load(11, &i.cgroupControllers)
+}
+
+func (s *staticFileSetStat) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.staticFileSetStat"
+}
+
+func (s *staticFileSetStat) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ "StaticData",
+ }
+}
+
+func (s *staticFileSetStat) beforeSave() {}
+
+func (s *staticFileSetStat) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.dynamicBytesFileSetAttr)
+ stateSinkObject.Save(1, &s.StaticData)
+}
+
+func (s *staticFileSetStat) afterLoad() {}
+
+func (s *staticFileSetStat) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.dynamicBytesFileSetAttr)
+ stateSourceObject.Load(1, &s.StaticData)
+}
+
+func (s *selfSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.selfSymlink"
+}
+
+func (s *selfSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "pidns",
+ }
+}
+
+func (s *selfSymlink) beforeSave() {}
+
+func (s *selfSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.pidns)
+}
+
+func (s *selfSymlink) afterLoad() {}
+
+func (s *selfSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.pidns)
+}
+
+func (s *threadSelfSymlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.threadSelfSymlink"
+}
+
+func (s *threadSelfSymlink) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeSymlink",
+ "pidns",
+ }
+}
+
+func (s *threadSelfSymlink) beforeSave() {}
+
+func (s *threadSelfSymlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.implStatFS)
+ stateSinkObject.Save(1, &s.InodeAttrs)
+ stateSinkObject.Save(2, &s.InodeNoopRefCount)
+ stateSinkObject.Save(3, &s.InodeSymlink)
+ stateSinkObject.Save(4, &s.pidns)
+}
+
+func (s *threadSelfSymlink) afterLoad() {}
+
+func (s *threadSelfSymlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.implStatFS)
+ stateSourceObject.Load(1, &s.InodeAttrs)
+ stateSourceObject.Load(2, &s.InodeNoopRefCount)
+ stateSourceObject.Load(3, &s.InodeSymlink)
+ stateSourceObject.Load(4, &s.pidns)
+}
+
+func (d *dynamicBytesFileSetAttr) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.dynamicBytesFileSetAttr"
+}
+
+func (d *dynamicBytesFileSetAttr) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (d *dynamicBytesFileSetAttr) beforeSave() {}
+
+func (d *dynamicBytesFileSetAttr) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+}
+
+func (d *dynamicBytesFileSetAttr) afterLoad() {}
+
+func (d *dynamicBytesFileSetAttr) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+}
+
+func (c *cpuStats) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.cpuStats"
+}
+
+func (c *cpuStats) StateFields() []string {
+ return []string{
+ "user",
+ "nice",
+ "system",
+ "idle",
+ "ioWait",
+ "irq",
+ "softirq",
+ "steal",
+ "guest",
+ "guestNice",
+ }
+}
+
+func (c *cpuStats) beforeSave() {}
+
+func (c *cpuStats) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.user)
+ stateSinkObject.Save(1, &c.nice)
+ stateSinkObject.Save(2, &c.system)
+ stateSinkObject.Save(3, &c.idle)
+ stateSinkObject.Save(4, &c.ioWait)
+ stateSinkObject.Save(5, &c.irq)
+ stateSinkObject.Save(6, &c.softirq)
+ stateSinkObject.Save(7, &c.steal)
+ stateSinkObject.Save(8, &c.guest)
+ stateSinkObject.Save(9, &c.guestNice)
+}
+
+func (c *cpuStats) afterLoad() {}
+
+func (c *cpuStats) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.user)
+ stateSourceObject.Load(1, &c.nice)
+ stateSourceObject.Load(2, &c.system)
+ stateSourceObject.Load(3, &c.idle)
+ stateSourceObject.Load(4, &c.ioWait)
+ stateSourceObject.Load(5, &c.irq)
+ stateSourceObject.Load(6, &c.softirq)
+ stateSourceObject.Load(7, &c.steal)
+ stateSourceObject.Load(8, &c.guest)
+ stateSourceObject.Load(9, &c.guestNice)
+}
+
+func (s *statData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.statData"
+}
+
+func (s *statData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (s *statData) beforeSave() {}
+
+func (s *statData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.dynamicBytesFileSetAttr)
+}
+
+func (s *statData) afterLoad() {}
+
+func (s *statData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.dynamicBytesFileSetAttr)
+}
+
+func (l *loadavgData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.loadavgData"
+}
+
+func (l *loadavgData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (l *loadavgData) beforeSave() {}
+
+func (l *loadavgData) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.dynamicBytesFileSetAttr)
+}
+
+func (l *loadavgData) afterLoad() {}
+
+func (l *loadavgData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.dynamicBytesFileSetAttr)
+}
+
+func (m *meminfoData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.meminfoData"
+}
+
+func (m *meminfoData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (m *meminfoData) beforeSave() {}
+
+func (m *meminfoData) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.dynamicBytesFileSetAttr)
+}
+
+func (m *meminfoData) afterLoad() {}
+
+func (m *meminfoData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.dynamicBytesFileSetAttr)
+}
+
+func (u *uptimeData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.uptimeData"
+}
+
+func (u *uptimeData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (u *uptimeData) beforeSave() {}
+
+func (u *uptimeData) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.dynamicBytesFileSetAttr)
+}
+
+func (u *uptimeData) afterLoad() {}
+
+func (u *uptimeData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.dynamicBytesFileSetAttr)
+}
+
+func (v *versionData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.versionData"
+}
+
+func (v *versionData) StateFields() []string {
+ return []string{
+ "dynamicBytesFileSetAttr",
+ }
+}
+
+func (v *versionData) beforeSave() {}
+
+func (v *versionData) StateSave(stateSinkObject state.Sink) {
+ v.beforeSave()
+ stateSinkObject.Save(0, &v.dynamicBytesFileSetAttr)
+}
+
+func (v *versionData) afterLoad() {}
+
+func (v *versionData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &v.dynamicBytesFileSetAttr)
+}
+
+func (d *filesystemsData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.filesystemsData"
+}
+
+func (d *filesystemsData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (d *filesystemsData) beforeSave() {}
+
+func (d *filesystemsData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+}
+
+func (d *filesystemsData) afterLoad() {}
+
+func (d *filesystemsData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+}
+
+func (r *tasksInodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tasksInodeRefs"
+}
+
+func (r *tasksInodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *tasksInodeRefs) beforeSave() {}
+
+func (r *tasksInodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *tasksInodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (t *tcpMemDir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpMemDir"
+}
+
+func (t *tcpMemDir) StateFields() []string {
+ return nil
+}
+
+func (d *mmapMinAddrData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.mmapMinAddrData"
+}
+
+func (d *mmapMinAddrData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "k",
+ }
+}
+
+func (d *mmapMinAddrData) beforeSave() {}
+
+func (d *mmapMinAddrData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.k)
+}
+
+func (d *mmapMinAddrData) afterLoad() {}
+
+func (d *mmapMinAddrData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.k)
+}
+
+func (h *hostnameData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.hostnameData"
+}
+
+func (h *hostnameData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ }
+}
+
+func (h *hostnameData) beforeSave() {}
+
+func (h *hostnameData) StateSave(stateSinkObject state.Sink) {
+ h.beforeSave()
+ stateSinkObject.Save(0, &h.DynamicBytesFile)
+}
+
+func (h *hostnameData) afterLoad() {}
+
+func (h *hostnameData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &h.DynamicBytesFile)
+}
+
+func (d *tcpSackData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpSackData"
+}
+
+func (d *tcpSackData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ "enabled",
+ }
+}
+
+func (d *tcpSackData) beforeSave() {}
+
+func (d *tcpSackData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+ stateSinkObject.Save(2, &d.enabled)
+}
+
+func (d *tcpSackData) afterLoad() {}
+
+func (d *tcpSackData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &d.stack)
+ stateSourceObject.Load(2, &d.enabled)
+}
+
+func (d *tcpRecoveryData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpRecoveryData"
+}
+
+func (d *tcpRecoveryData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ }
+}
+
+func (d *tcpRecoveryData) beforeSave() {}
+
+func (d *tcpRecoveryData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.stack)
+}
+
+func (d *tcpRecoveryData) afterLoad() {}
+
+func (d *tcpRecoveryData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &d.stack)
+}
+
+func (d *tcpMemData) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.tcpMemData"
+}
+
+func (d *tcpMemData) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "dir",
+ "stack",
+ }
+}
+
+func (d *tcpMemData) beforeSave() {}
+
+func (d *tcpMemData) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.DynamicBytesFile)
+ stateSinkObject.Save(1, &d.dir)
+ stateSinkObject.Save(2, &d.stack)
+}
+
+func (d *tcpMemData) afterLoad() {}
+
+func (d *tcpMemData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.DynamicBytesFile)
+ stateSourceObject.Load(1, &d.dir)
+ stateSourceObject.LoadWait(2, &d.stack)
+}
+
+func (ipf *ipForwarding) StateTypeName() string {
+ return "pkg/sentry/fsimpl/proc.ipForwarding"
+}
+
+func (ipf *ipForwarding) StateFields() []string {
+ return []string{
+ "DynamicBytesFile",
+ "stack",
+ "enabled",
+ }
+}
+
+func (ipf *ipForwarding) beforeSave() {}
+
+func (ipf *ipForwarding) StateSave(stateSinkObject state.Sink) {
+ ipf.beforeSave()
+ stateSinkObject.Save(0, &ipf.DynamicBytesFile)
+ stateSinkObject.Save(1, &ipf.stack)
+ stateSinkObject.Save(2, &ipf.enabled)
+}
+
+func (ipf *ipForwarding) afterLoad() {}
+
+func (ipf *ipForwarding) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ipf.DynamicBytesFile)
+ stateSourceObject.LoadWait(1, &ipf.stack)
+ stateSourceObject.Load(2, &ipf.enabled)
+}
+
+func init() {
+ state.Register((*fdDirInodeRefs)(nil))
+ state.Register((*fdInfoDirInodeRefs)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*staticFile)(nil))
+ state.Register((*InternalData)(nil))
+ state.Register((*implStatFS)(nil))
+ state.Register((*subtasksInode)(nil))
+ state.Register((*subtasksFD)(nil))
+ state.Register((*subtasksInodeRefs)(nil))
+ state.Register((*taskInode)(nil))
+ state.Register((*taskOwnedInode)(nil))
+ state.Register((*fdDir)(nil))
+ state.Register((*fdDirInode)(nil))
+ state.Register((*fdSymlink)(nil))
+ state.Register((*fdInfoDirInode)(nil))
+ state.Register((*fdInfoData)(nil))
+ state.Register((*auxvData)(nil))
+ state.Register((*cmdlineData)(nil))
+ state.Register((*commInode)(nil))
+ state.Register((*commData)(nil))
+ state.Register((*idMapData)(nil))
+ state.Register((*memInode)(nil))
+ state.Register((*memFD)(nil))
+ state.Register((*mapsData)(nil))
+ state.Register((*smapsData)(nil))
+ state.Register((*taskStatData)(nil))
+ state.Register((*statmData)(nil))
+ state.Register((*statusData)(nil))
+ state.Register((*ioData)(nil))
+ state.Register((*oomScoreAdj)(nil))
+ state.Register((*exeSymlink)(nil))
+ state.Register((*cwdSymlink)(nil))
+ state.Register((*mountInfoData)(nil))
+ state.Register((*mountsData)(nil))
+ state.Register((*namespaceSymlink)(nil))
+ state.Register((*namespaceInode)(nil))
+ state.Register((*namespaceFD)(nil))
+ state.Register((*taskInodeRefs)(nil))
+ state.Register((*ifinet6)(nil))
+ state.Register((*netDevData)(nil))
+ state.Register((*netUnixData)(nil))
+ state.Register((*netTCPData)(nil))
+ state.Register((*netTCP6Data)(nil))
+ state.Register((*netUDPData)(nil))
+ state.Register((*netSnmpData)(nil))
+ state.Register((*snmpLine)(nil))
+ state.Register((*netRouteData)(nil))
+ state.Register((*netStatData)(nil))
+ state.Register((*tasksInode)(nil))
+ state.Register((*staticFileSetStat)(nil))
+ state.Register((*selfSymlink)(nil))
+ state.Register((*threadSelfSymlink)(nil))
+ state.Register((*dynamicBytesFileSetAttr)(nil))
+ state.Register((*cpuStats)(nil))
+ state.Register((*statData)(nil))
+ state.Register((*loadavgData)(nil))
+ state.Register((*meminfoData)(nil))
+ state.Register((*uptimeData)(nil))
+ state.Register((*versionData)(nil))
+ state.Register((*filesystemsData)(nil))
+ state.Register((*tasksInodeRefs)(nil))
+ state.Register((*tcpMemDir)(nil))
+ state.Register((*mmapMinAddrData)(nil))
+ state.Register((*hostnameData)(nil))
+ state.Register((*tcpSackData)(nil))
+ state.Register((*tcpRecoveryData)(nil))
+ state.Register((*tcpMemData)(nil))
+ state.Register((*ipForwarding)(nil))
+}
diff --git a/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
new file mode 100644
index 000000000..c609e618a
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/subtasks_inode_refs.go
@@ -0,0 +1,128 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const subtasksInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var subtasksInodeobj *subtasksInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type subtasksInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *subtasksInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", subtasksInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *subtasksInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *subtasksInodeRefs) LogRefs() bool {
+ return subtasksInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *subtasksInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *subtasksInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *subtasksInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *subtasksInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *subtasksInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *subtasksInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/task_inode_refs.go b/pkg/sentry/fsimpl/proc/task_inode_refs.go
new file mode 100644
index 000000000..2ee58bd62
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/task_inode_refs.go
@@ -0,0 +1,128 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const taskInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var taskInodeobj *taskInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type taskInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *taskInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", taskInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *taskInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *taskInodeRefs) LogRefs() bool {
+ return taskInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *taskInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *taskInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *taskInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *taskInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *taskInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *taskInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/tasks_inode_refs.go b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
new file mode 100644
index 000000000..71dcaed9e
--- /dev/null
+++ b/pkg/sentry/fsimpl/proc/tasks_inode_refs.go
@@ -0,0 +1,128 @@
+package proc
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const tasksInodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var tasksInodeobj *tasksInode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type tasksInodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *tasksInodeRefs) RefType() string {
+ return fmt.Sprintf("%T", tasksInodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *tasksInodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *tasksInodeRefs) LogRefs() bool {
+ return tasksInodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *tasksInodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *tasksInodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *tasksInodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *tasksInodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *tasksInodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *tasksInodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/proc/tasks_sys_test.go b/pkg/sentry/fsimpl/proc/tasks_sys_test.go
deleted file mode 100644
index 6cee22823..000000000
--- a/pkg/sentry/fsimpl/proc/tasks_sys_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "bytes"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/inet"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-func newIPv6TestStack() *inet.TestStack {
- s := inet.NewTestStack()
- s.SupportsIPv6Flag = true
- return s
-}
-
-func TestIfinet6NoAddresses(t *testing.T) {
- n := &ifinet6{stack: newIPv6TestStack()}
- var buf bytes.Buffer
- n.Generate(contexttest.Context(t), &buf)
- if buf.Len() > 0 {
- t.Errorf("n.Generate() generated = %v, want = %v", buf.Bytes(), []byte{})
- }
-}
-
-func TestIfinet6(t *testing.T) {
- s := newIPv6TestStack()
- s.InterfacesMap[1] = inet.Interface{Name: "eth0"}
- s.InterfaceAddrsMap[1] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"),
- },
- }
- s.InterfacesMap[2] = inet.Interface{Name: "eth1"}
- s.InterfaceAddrsMap[2] = []inet.InterfaceAddr{
- {
- Family: linux.AF_INET6,
- PrefixLen: 128,
- Addr: []byte("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"),
- },
- }
- want := map[string]struct{}{
- "000102030405060708090a0b0c0d0e0f 01 80 00 00 eth0\n": {},
- "101112131415161718191a1b1c1d1e1f 02 80 00 00 eth1\n": {},
- }
-
- n := &ifinet6{stack: s}
- contents := n.contents()
- if len(contents) != len(want) {
- t.Errorf("Got len(n.contents()) = %d, want = %d", len(contents), len(want))
- }
- got := map[string]struct{}{}
- for _, l := range contents {
- got[l] = struct{}{}
- }
-
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Got n.contents() = %v, want = %v", got, want)
- }
-}
-
-// TestIPForwarding tests the implementation of
-// /proc/sys/net/ipv4/ip_forwarding
-func TestConfigureIPForwarding(t *testing.T) {
- ctx := context.Background()
- s := inet.NewTestStack()
-
- var cases = []struct {
- comment string
- initial bool
- str string
- final bool
- }{
- {
- comment: `Forwarding is disabled; write 1 and enable forwarding`,
- initial: false,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is disabled; write 0 and disable forwarding`,
- initial: false,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is enabled; write 1 and enable forwarding`,
- initial: true,
- str: "1",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 0 and disable forwarding`,
- initial: true,
- str: "0",
- final: false,
- },
- {
- comment: `Forwarding is disabled; write 2404 and enable forwarding`,
- initial: false,
- str: "2404",
- final: true,
- },
- {
- comment: `Forwarding is enabled; write 2404 and enable forwarding`,
- initial: true,
- str: "2404",
- final: true,
- },
- }
- for _, c := range cases {
- t.Run(c.comment, func(t *testing.T) {
- s.IPForwarding = c.initial
-
- file := &ipForwarding{stack: s, enabled: &c.initial}
-
- // Write the values.
- src := usermem.BytesIOSequence([]byte(c.str))
- if n, err := file.Write(ctx, src, 0); n != int64(len(c.str)) || err != nil {
- t.Errorf("file.Write(ctx, nil, %q, 0) = (%d, %v); want (%d, nil)", c.str, n, err, len(c.str))
- }
-
- // Read the values from the stack and check them.
- if got, want := s.IPForwarding, c.final; got != want {
- t.Errorf("s.IPForwarding incorrect; got: %v, want: %v", got, want)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/proc/tasks_test.go b/pkg/sentry/fsimpl/proc/tasks_test.go
deleted file mode 100644
index 7ee6227a9..000000000
--- a/pkg/sentry/fsimpl/proc/tasks_test.go
+++ /dev/null
@@ -1,510 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proc
-
-import (
- "fmt"
- "math"
- "path"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-var (
- // Next offset 256 by convention. Adds 1 for the next offset.
- selfLink = vfs.Dirent{Type: linux.DT_LNK, NextOff: 256 + 0 + 1}
- threadSelfLink = vfs.Dirent{Type: linux.DT_LNK, NextOff: 256 + 1 + 1}
-
- // /proc/[pid] next offset starts at 256+2 (files above), then adds the
- // PID, and adds 1 for the next offset.
- proc1 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 1 + 1}
- proc2 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 2 + 1}
- proc3 = vfs.Dirent{Type: linux.DT_DIR, NextOff: 258 + 3 + 1}
-)
-
-var (
- tasksStaticFiles = map[string]testutil.DirentType{
- "cpuinfo": linux.DT_REG,
- "filesystems": linux.DT_REG,
- "loadavg": linux.DT_REG,
- "meminfo": linux.DT_REG,
- "mounts": linux.DT_LNK,
- "net": linux.DT_LNK,
- "self": linux.DT_LNK,
- "stat": linux.DT_REG,
- "sys": linux.DT_DIR,
- "thread-self": linux.DT_LNK,
- "uptime": linux.DT_REG,
- "version": linux.DT_REG,
- }
- tasksStaticFilesNextOffs = map[string]int64{
- "self": selfLink.NextOff,
- "thread-self": threadSelfLink.NextOff,
- }
- taskStaticFiles = map[string]testutil.DirentType{
- "auxv": linux.DT_REG,
- "cgroup": linux.DT_REG,
- "cwd": linux.DT_LNK,
- "cmdline": linux.DT_REG,
- "comm": linux.DT_REG,
- "environ": linux.DT_REG,
- "exe": linux.DT_LNK,
- "fd": linux.DT_DIR,
- "fdinfo": linux.DT_DIR,
- "gid_map": linux.DT_REG,
- "io": linux.DT_REG,
- "maps": linux.DT_REG,
- "mem": linux.DT_REG,
- "mountinfo": linux.DT_REG,
- "mounts": linux.DT_REG,
- "net": linux.DT_DIR,
- "ns": linux.DT_DIR,
- "oom_score": linux.DT_REG,
- "oom_score_adj": linux.DT_REG,
- "smaps": linux.DT_REG,
- "stat": linux.DT_REG,
- "statm": linux.DT_REG,
- "status": linux.DT_REG,
- "task": linux.DT_DIR,
- "uid_map": linux.DT_REG,
- }
-)
-
-func setup(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Error creating kernel: %v", err)
- }
-
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
-
- k.VFS().MustRegisterFilesystemType(Name, &FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- mntns, err := k.VFS().NewMountNamespace(ctx, creds, "", tmpfs.Name, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("NewMountNamespace(): %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- pop := &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse("/proc"),
- }
- if err := k.VFS().MkdirAt(ctx, creds, pop, &vfs.MkdirOptions{Mode: 0777}); err != nil {
- t.Fatalf("MkDir(/proc): %v", err)
- }
-
- pop = &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse("/proc"),
- }
- mntOpts := &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: &InternalData{
- Cgroups: map[string]string{
- "cpuset": "/foo/cpuset",
- "memory": "/foo/memory",
- },
- },
- },
- }
- if _, err := k.VFS().MountAt(ctx, creds, "", pop, Name, mntOpts); err != nil {
- t.Fatalf("MountAt(/proc): %v", err)
- }
- return testutil.NewSystem(ctx, t, k.VFS(), mntns)
-}
-
-func TestTasksEmpty(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc"))
- s.AssertAllDirentTypes(collector, tasksStaticFiles)
- s.AssertDirentOffsets(collector, tasksStaticFilesNextOffs)
-}
-
-func TestTasks(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- expectedDirents := make(map[string]testutil.DirentType)
- for n, d := range tasksStaticFiles {
- expectedDirents[n] = d
- }
-
- k := kernel.KernelFromContext(s.Ctx)
- var tasks []*kernel.Task
- for i := 0; i < 5; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- tasks = append(tasks, task)
- expectedDirents[fmt.Sprintf("%d", i+1)] = linux.DT_DIR
- }
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc"))
- s.AssertAllDirentTypes(collector, expectedDirents)
- s.AssertDirentOffsets(collector, tasksStaticFilesNextOffs)
-
- lastPid := 0
- dirents := collector.OrderedDirents()
- doneSkippingNonTaskDirs := false
- for _, d := range dirents {
- pid, err := strconv.Atoi(d.Name)
- if err != nil {
- if !doneSkippingNonTaskDirs {
- // We haven't gotten to the task dirs yet.
- continue
- }
- t.Fatalf("Invalid process directory %q", d.Name)
- }
- doneSkippingNonTaskDirs = true
- if lastPid > pid {
- t.Errorf("pids not in order: %v", dirents)
- }
- found := false
- for _, t := range tasks {
- if k.TaskSet().Root.IDOfTask(t) == kernel.ThreadID(pid) {
- found = true
- }
- }
- if !found {
- t.Errorf("Additional task ID %d listed: %v", pid, tasks)
- }
- // Next offset starts at 256+2 ('self' and 'thread-self'), then adds the
- // PID, and adds 1 for the next offset.
- if want := int64(256 + 2 + pid + 1); d.NextOff != want {
- t.Errorf("Wrong dirent offset want: %d got: %d: %+v", want, d.NextOff, d)
- }
- }
- if !doneSkippingNonTaskDirs {
- t.Fatalf("Never found any process directories.")
- }
-
- // Test lookup.
- for _, path := range []string{"/proc/1", "/proc/2"} {
- fd, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot(path),
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(%q) failed: %v", path, err)
- }
- defer fd.DecRef(s.Ctx)
- buf := make([]byte, 1)
- bufIOSeq := usermem.BytesIOSequence(buf)
- if _, err := fd.Read(s.Ctx, bufIOSeq, vfs.ReadOptions{}); err != syserror.EISDIR {
- t.Errorf("wrong error reading directory: %v", err)
- }
- }
-
- if _, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot("/proc/9999"),
- &vfs.OpenOptions{},
- ); err != syserror.ENOENT {
- t.Fatalf("wrong error from vfsfs.OpenAt(/proc/9999): %v", err)
- }
-}
-
-func TestTasksOffset(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- for i := 0; i < 3; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- if _, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root); err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- }
-
- for _, tc := range []struct {
- name string
- offset int64
- wants map[string]vfs.Dirent
- }{
- {
- name: "small offset",
- offset: 100,
- wants: map[string]vfs.Dirent{
- "self": selfLink,
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "offset at start",
- offset: 256,
- wants: map[string]vfs.Dirent{
- "self": selfLink,
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip /proc/self",
- offset: 257,
- wants: map[string]vfs.Dirent{
- "thread-self": threadSelfLink,
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip symlinks",
- offset: 258,
- wants: map[string]vfs.Dirent{
- "1": proc1,
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "skip first process",
- offset: 260,
- wants: map[string]vfs.Dirent{
- "2": proc2,
- "3": proc3,
- },
- },
- {
- name: "last process",
- offset: 261,
- wants: map[string]vfs.Dirent{
- "3": proc3,
- },
- },
- {
- name: "after last",
- offset: 262,
- wants: nil,
- },
- {
- name: "TaskLimit+1",
- offset: kernel.TasksLimit + 1,
- wants: nil,
- },
- {
- name: "max",
- offset: math.MaxInt64,
- wants: nil,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- s := s.WithSubtest(t)
- fd, err := s.VFS.OpenAt(
- s.Ctx,
- s.Creds,
- s.PathOpAtRoot("/proc"),
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(/) failed: %v", err)
- }
- defer fd.DecRef(s.Ctx)
- if _, err := fd.Seek(s.Ctx, tc.offset, linux.SEEK_SET); err != nil {
- t.Fatalf("Seek(%d, SEEK_SET): %v", tc.offset, err)
- }
-
- var collector testutil.DirentCollector
- if err := fd.IterDirents(s.Ctx, &collector); err != nil {
- t.Fatalf("IterDirent(): %v", err)
- }
-
- expectedTypes := make(map[string]testutil.DirentType)
- expectedOffsets := make(map[string]int64)
- for name, want := range tc.wants {
- expectedTypes[name] = want.Type
- if want.NextOff != 0 {
- expectedOffsets[name] = want.NextOff
- }
- }
-
- collector.SkipDotsChecks(true) // We seek()ed past the dots.
- s.AssertAllDirentTypes(&collector, expectedTypes)
- s.AssertDirentOffsets(&collector, expectedOffsets)
- })
- }
-}
-
-func TestTask(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- _, err := testutil.CreateTask(s.Ctx, "name", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
-
- collector := s.ListDirents(s.PathOpAtRoot("/proc/1"))
- s.AssertAllDirentTypes(collector, taskStaticFiles)
-}
-
-func TestProcSelf(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, "name", tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
-
- collector := s.WithTemporaryContext(task).ListDirents(&vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse("/proc/self/"),
- FollowFinalSymlink: true,
- })
- s.AssertAllDirentTypes(collector, taskStaticFiles)
-}
-
-func iterateDir(ctx context.Context, t *testing.T, s *testutil.System, fd *vfs.FileDescription) {
- t.Logf("Iterating: %s", fd.MappedName(ctx))
-
- var collector testutil.DirentCollector
- if err := fd.IterDirents(ctx, &collector); err != nil {
- t.Fatalf("IterDirents(): %v", err)
- }
- if err := collector.Contains(".", linux.DT_DIR); err != nil {
- t.Error(err.Error())
- }
- if err := collector.Contains("..", linux.DT_DIR); err != nil {
- t.Error(err.Error())
- }
-
- for _, d := range collector.Dirents() {
- if d.Name == "." || d.Name == ".." {
- continue
- }
- absPath := path.Join(fd.MappedName(ctx), d.Name)
- if d.Type == linux.DT_LNK {
- link, err := s.VFS.ReadlinkAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse(absPath)},
- )
- if err != nil {
- t.Errorf("vfsfs.ReadlinkAt(%v) failed: %v", absPath, err)
- } else {
- t.Logf("Skipping symlink: %s => %s", absPath, link)
- }
- continue
- }
-
- t.Logf("Opening: %s", absPath)
- child, err := s.VFS.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse(absPath)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Errorf("vfsfs.OpenAt(%v) failed: %v", absPath, err)
- continue
- }
- defer child.DecRef(ctx)
- stat, err := child.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("Stat(%v) failed: %v", absPath, err)
- }
- if got := linux.FileMode(stat.Mode).DirentType(); got != d.Type {
- t.Errorf("wrong file mode, stat: %v, dirent: %v", got, d.Type)
- }
- if d.Type == linux.DT_DIR {
- // Found another dir, let's do it again!
- iterateDir(ctx, t, s, child)
- }
- }
-}
-
-// TestTree iterates all directories and stats every file.
-func TestTree(t *testing.T) {
- s := setup(t)
- defer s.Destroy()
-
- k := kernel.KernelFromContext(s.Ctx)
-
- pop := &vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse("test-file"),
- }
- opts := &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_CREAT,
- Mode: 0777,
- }
- file, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, opts)
- if err != nil {
- t.Fatalf("failed to create test file: %v", err)
- }
- defer file.DecRef(s.Ctx)
-
- var tasks []*kernel.Task
- for i := 0; i < 5; i++ {
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(s.Ctx, fmt.Sprintf("name-%d", i), tc, s.MntNs, s.Root, s.Root)
- if err != nil {
- t.Fatalf("CreateTask(): %v", err)
- }
- // Add file to populate /proc/[pid]/fd and fdinfo directories.
- task.FDTable().NewFDVFS2(task, 0, file, kernel.FDFlags{})
- tasks = append(tasks, task)
- }
-
- ctx := tasks[0]
- fd, err := s.VFS.OpenAt(
- ctx,
- auth.CredentialsFromContext(s.Ctx),
- &vfs.PathOperation{Root: s.Root, Start: s.Root, Path: fspath.Parse("/proc")},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt(/proc) failed: %v", err)
- }
- iterateDir(ctx, t, s, fd)
- fd.DecRef(ctx)
-}
diff --git a/pkg/sentry/fsimpl/signalfd/BUILD b/pkg/sentry/fsimpl/signalfd/BUILD
deleted file mode 100644
index adb610213..000000000
--- a/pkg/sentry/fsimpl/signalfd/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "signalfd",
- srcs = ["signalfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/kernel",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go b/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go
new file mode 100644
index 000000000..b3596a886
--- /dev/null
+++ b/pkg/sentry/fsimpl/signalfd/signalfd_state_autogen.go
@@ -0,0 +1,49 @@
+// automatically generated by stateify.
+
+package signalfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (sfd *SignalFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/signalfd.SignalFileDescription"
+}
+
+func (sfd *SignalFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "target",
+ "mask",
+ }
+}
+
+func (sfd *SignalFileDescription) beforeSave() {}
+
+func (sfd *SignalFileDescription) StateSave(stateSinkObject state.Sink) {
+ sfd.beforeSave()
+ stateSinkObject.Save(0, &sfd.vfsfd)
+ stateSinkObject.Save(1, &sfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &sfd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &sfd.NoLockFD)
+ stateSinkObject.Save(4, &sfd.target)
+ stateSinkObject.Save(5, &sfd.mask)
+}
+
+func (sfd *SignalFileDescription) afterLoad() {}
+
+func (sfd *SignalFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &sfd.vfsfd)
+ stateSourceObject.Load(1, &sfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &sfd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &sfd.NoLockFD)
+ stateSourceObject.Load(4, &sfd.target)
+ stateSourceObject.Load(5, &sfd.mask)
+}
+
+func init() {
+ state.Register((*SignalFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sockfs/BUILD b/pkg/sentry/fsimpl/sockfs/BUILD
deleted file mode 100644
index 9453277b8..000000000
--- a/pkg/sentry/fsimpl/sockfs/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "sockfs",
- srcs = ["sockfs.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go b/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go
new file mode 100644
index 000000000..6a6607555
--- /dev/null
+++ b/pkg/sentry/fsimpl/sockfs/sockfs_state_autogen.go
@@ -0,0 +1,90 @@
+// automatically generated by stateify.
+
+package sockfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (fsType *filesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.filesystemType"
+}
+
+func (fsType *filesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *filesystemType) beforeSave() {}
+
+func (fsType *filesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *filesystemType) afterLoad() {}
+
+func (fsType *filesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sockfs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAttrs)
+ stateSinkObject.Save(1, &i.InodeNoopRefCount)
+ stateSinkObject.Save(2, &i.InodeNotDirectory)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+}
+
+func (i *inode) afterLoad() {}
+
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAttrs)
+ stateSourceObject.Load(1, &i.InodeNoopRefCount)
+ stateSourceObject.Load(2, &i.InodeNotDirectory)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+}
+
+func init() {
+ state.Register((*filesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*inode)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sys/BUILD b/pkg/sentry/fsimpl/sys/BUILD
deleted file mode 100644
index 09043b572..000000000
--- a/pkg/sentry/fsimpl/sys/BUILD
+++ /dev/null
@@ -1,55 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dir_refs",
- out = "dir_refs.go",
- package = "sys",
- prefix = "dir",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "dir",
- },
-)
-
-go_library(
- name = "sys",
- srcs = [
- "dir_refs.go",
- "kcov.go",
- "sys.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/coverage",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/kernfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "sys_test",
- srcs = ["sys_test.go"],
- deps = [
- ":sys",
- "//pkg/abi/linux",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/sys/dir_refs.go b/pkg/sentry/fsimpl/sys/dir_refs.go
new file mode 100644
index 000000000..176a4bf98
--- /dev/null
+++ b/pkg/sentry/fsimpl/sys/dir_refs.go
@@ -0,0 +1,128 @@
+package sys
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const direnableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var dirobj *dir
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type dirRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *dirRefs) RefType() string {
+ return fmt.Sprintf("%T", dirobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *dirRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *dirRefs) LogRefs() bool {
+ return direnableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *dirRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *dirRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *dirRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *dirRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *dirRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *dirRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/sys/sys_state_autogen.go b/pkg/sentry/fsimpl/sys/sys_state_autogen.go
new file mode 100644
index 000000000..13cbe9a90
--- /dev/null
+++ b/pkg/sentry/fsimpl/sys/sys_state_autogen.go
@@ -0,0 +1,247 @@
+// automatically generated by stateify.
+
+package sys
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (r *dirRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.dirRefs"
+}
+
+func (r *dirRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *dirRefs) beforeSave() {}
+
+func (r *dirRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *dirRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (i *kcovInode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.kcovInode"
+}
+
+func (i *kcovInode) StateFields() []string {
+ return []string{
+ "InodeAttrs",
+ "InodeNoopRefCount",
+ "InodeNotDirectory",
+ "InodeNotSymlink",
+ "implStatFS",
+ }
+}
+
+func (i *kcovInode) beforeSave() {}
+
+func (i *kcovInode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.InodeAttrs)
+ stateSinkObject.Save(1, &i.InodeNoopRefCount)
+ stateSinkObject.Save(2, &i.InodeNotDirectory)
+ stateSinkObject.Save(3, &i.InodeNotSymlink)
+ stateSinkObject.Save(4, &i.implStatFS)
+}
+
+func (i *kcovInode) afterLoad() {}
+
+func (i *kcovInode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.InodeAttrs)
+ stateSourceObject.Load(1, &i.InodeNoopRefCount)
+ stateSourceObject.Load(2, &i.InodeNotDirectory)
+ stateSourceObject.Load(3, &i.InodeNotSymlink)
+ stateSourceObject.Load(4, &i.implStatFS)
+}
+
+func (fd *kcovFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.kcovFD"
+}
+
+func (fd *kcovFD) StateFields() []string {
+ return []string{
+ "FileDescriptionDefaultImpl",
+ "NoLockFD",
+ "vfsfd",
+ "inode",
+ "kcov",
+ }
+}
+
+func (fd *kcovFD) beforeSave() {}
+
+func (fd *kcovFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(1, &fd.NoLockFD)
+ stateSinkObject.Save(2, &fd.vfsfd)
+ stateSinkObject.Save(3, &fd.inode)
+ stateSinkObject.Save(4, &fd.kcov)
+}
+
+func (fd *kcovFD) afterLoad() {}
+
+func (fd *kcovFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(1, &fd.NoLockFD)
+ stateSourceObject.Load(2, &fd.vfsfd)
+ stateSourceObject.Load(3, &fd.inode)
+ stateSourceObject.Load(4, &fd.kcov)
+}
+
+func (fsType *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.FilesystemType"
+}
+
+func (fsType *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fsType *FilesystemType) beforeSave() {}
+
+func (fsType *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fsType.beforeSave()
+}
+
+func (fsType *FilesystemType) afterLoad() {}
+
+func (fsType *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "Filesystem",
+ "devMinor",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.Filesystem)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.Filesystem)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (d *dir) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.dir"
+}
+
+func (d *dir) StateFields() []string {
+ return []string{
+ "dirRefs",
+ "InodeAlwaysValid",
+ "InodeAttrs",
+ "InodeNotSymlink",
+ "InodeDirectoryNoNewChildren",
+ "InodeTemporary",
+ "OrderedChildren",
+ "locks",
+ }
+}
+
+func (d *dir) beforeSave() {}
+
+func (d *dir) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dirRefs)
+ stateSinkObject.Save(1, &d.InodeAlwaysValid)
+ stateSinkObject.Save(2, &d.InodeAttrs)
+ stateSinkObject.Save(3, &d.InodeNotSymlink)
+ stateSinkObject.Save(4, &d.InodeDirectoryNoNewChildren)
+ stateSinkObject.Save(5, &d.InodeTemporary)
+ stateSinkObject.Save(6, &d.OrderedChildren)
+ stateSinkObject.Save(7, &d.locks)
+}
+
+func (d *dir) afterLoad() {}
+
+func (d *dir) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dirRefs)
+ stateSourceObject.Load(1, &d.InodeAlwaysValid)
+ stateSourceObject.Load(2, &d.InodeAttrs)
+ stateSourceObject.Load(3, &d.InodeNotSymlink)
+ stateSourceObject.Load(4, &d.InodeDirectoryNoNewChildren)
+ stateSourceObject.Load(5, &d.InodeTemporary)
+ stateSourceObject.Load(6, &d.OrderedChildren)
+ stateSourceObject.Load(7, &d.locks)
+}
+
+func (c *cpuFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.cpuFile"
+}
+
+func (c *cpuFile) StateFields() []string {
+ return []string{
+ "implStatFS",
+ "DynamicBytesFile",
+ "maxCores",
+ }
+}
+
+func (c *cpuFile) beforeSave() {}
+
+func (c *cpuFile) StateSave(stateSinkObject state.Sink) {
+ c.beforeSave()
+ stateSinkObject.Save(0, &c.implStatFS)
+ stateSinkObject.Save(1, &c.DynamicBytesFile)
+ stateSinkObject.Save(2, &c.maxCores)
+}
+
+func (c *cpuFile) afterLoad() {}
+
+func (c *cpuFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &c.implStatFS)
+ stateSourceObject.Load(1, &c.DynamicBytesFile)
+ stateSourceObject.Load(2, &c.maxCores)
+}
+
+func (i *implStatFS) StateTypeName() string {
+ return "pkg/sentry/fsimpl/sys.implStatFS"
+}
+
+func (i *implStatFS) StateFields() []string {
+ return []string{}
+}
+
+func (i *implStatFS) beforeSave() {}
+
+func (i *implStatFS) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+}
+
+func (i *implStatFS) afterLoad() {}
+
+func (i *implStatFS) StateLoad(stateSourceObject state.Source) {
+}
+
+func init() {
+ state.Register((*dirRefs)(nil))
+ state.Register((*kcovInode)(nil))
+ state.Register((*kcovFD)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*dir)(nil))
+ state.Register((*cpuFile)(nil))
+ state.Register((*implStatFS)(nil))
+}
diff --git a/pkg/sentry/fsimpl/sys/sys_test.go b/pkg/sentry/fsimpl/sys/sys_test.go
deleted file mode 100644
index 0a0d914cc..000000000
--- a/pkg/sentry/fsimpl/sys/sys_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sys_test
-
-import (
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/sys"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-func newTestSystem(t *testing.T) *testutil.System {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("Failed to create test kernel: %v", err)
- }
- ctx := k.SupervisorContext()
- creds := auth.CredentialsFromContext(ctx)
- k.VFS().MustRegisterFilesystemType(sys.Name, sys.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- mns, err := k.VFS().NewMountNamespace(ctx, creds, "", sys.Name, &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("Failed to create new mount namespace: %v", err)
- }
- return testutil.NewSystem(ctx, t, k.VFS(), mns)
-}
-
-func TestReadCPUFile(t *testing.T) {
- s := newTestSystem(t)
- defer s.Destroy()
- k := kernel.KernelFromContext(s.Ctx)
- maxCPUCores := k.ApplicationCores()
-
- expected := fmt.Sprintf("0-%d\n", maxCPUCores-1)
-
- for _, fname := range []string{"online", "possible", "present"} {
- pop := s.PathOpAtRoot(fmt.Sprintf("devices/system/cpu/%s", fname))
- fd, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, &vfs.OpenOptions{})
- if err != nil {
- t.Fatalf("OpenAt(pop:%+v) = %+v failed: %v", pop, fd, err)
- }
- defer fd.DecRef(s.Ctx)
- content, err := s.ReadToEnd(fd)
- if err != nil {
- t.Fatalf("Read failed: %v", err)
- }
- if diff := cmp.Diff(expected, content); diff != "" {
- t.Fatalf("Read returned unexpected data:\n--- want\n+++ got\n%v", diff)
- }
- }
-}
-
-func TestSysRootContainsExpectedEntries(t *testing.T) {
- s := newTestSystem(t)
- defer s.Destroy()
- pop := s.PathOpAtRoot("/")
- s.AssertAllDirentTypes(s.ListDirents(pop), map[string]testutil.DirentType{
- "block": linux.DT_DIR,
- "bus": linux.DT_DIR,
- "class": linux.DT_DIR,
- "dev": linux.DT_DIR,
- "devices": linux.DT_DIR,
- "firmware": linux.DT_DIR,
- "fs": linux.DT_DIR,
- "kernel": linux.DT_DIR,
- "module": linux.DT_DIR,
- "power": linux.DT_DIR,
- })
-}
diff --git a/pkg/sentry/fsimpl/testutil/BUILD b/pkg/sentry/fsimpl/testutil/BUILD
deleted file mode 100644
index 400a97996..000000000
--- a/pkg/sentry/fsimpl/testutil/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "testutil",
- testonly = 1,
- srcs = [
- "kernel.go",
- "testutil.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/cpuid",
- "//pkg/fspath",
- "//pkg/memutil",
- "//pkg/sentry/fsbridge",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/sched",
- "//pkg/sentry/limits",
- "//pkg/sentry/loader",
- "//pkg/sentry/mm",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/platform/kvm",
- "//pkg/sentry/platform/ptrace",
- "//pkg/sentry/time",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/testutil/kernel.go b/pkg/sentry/fsimpl/testutil/kernel.go
deleted file mode 100644
index 738c0c9cc..000000000
--- a/pkg/sentry/fsimpl/testutil/kernel.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "flag"
- "fmt"
- "os"
- "runtime"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/cpuid"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/memutil"
- "gvisor.dev/gvisor/pkg/sentry/fsbridge"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/sched"
- "gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/sentry/loader"
- "gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/sentry/pgalloc"
- "gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/sentry/time"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-
- // Platforms are plugable.
- _ "gvisor.dev/gvisor/pkg/sentry/platform/kvm"
- _ "gvisor.dev/gvisor/pkg/sentry/platform/ptrace"
-)
-
-var (
- platformFlag = flag.String("platform", "ptrace", "specify which platform to use")
-)
-
-// Boot initializes a new bare bones kernel for test.
-func Boot() (*kernel.Kernel, error) {
- platformCtr, err := platform.Lookup(*platformFlag)
- if err != nil {
- return nil, fmt.Errorf("platform not found: %v", err)
- }
- deviceFile, err := platformCtr.OpenDevice()
- if err != nil {
- return nil, fmt.Errorf("creating platform: %v", err)
- }
- plat, err := platformCtr.New(deviceFile)
- if err != nil {
- return nil, fmt.Errorf("creating platform: %v", err)
- }
-
- kernel.VFS2Enabled = true
- k := &kernel.Kernel{
- Platform: plat,
- }
-
- mf, err := createMemoryFile()
- if err != nil {
- return nil, err
- }
- k.SetMemoryFile(mf)
-
- // Pass k as the platform since it is savable, unlike the actual platform.
- vdso, err := loader.PrepareVDSO(k)
- if err != nil {
- return nil, fmt.Errorf("creating vdso: %v", err)
- }
-
- // Create timekeeper.
- tk, err := kernel.NewTimekeeper(k, vdso.ParamPage.FileRange())
- if err != nil {
- return nil, fmt.Errorf("creating timekeeper: %v", err)
- }
- tk.SetClocks(time.NewCalibratedClocks())
-
- creds := auth.NewRootCredentials(auth.NewRootUserNamespace())
-
- // Initiate the Kernel object, which is required by the Context passed
- // to createVFS in order to mount (among other things) procfs.
- if err = k.Init(kernel.InitKernelArgs{
- ApplicationCores: uint(runtime.GOMAXPROCS(-1)),
- FeatureSet: cpuid.HostFeatureSet(),
- Timekeeper: tk,
- RootUserNamespace: creds.UserNamespace,
- Vdso: vdso,
- RootUTSNamespace: kernel.NewUTSNamespace("hostname", "domain", creds.UserNamespace),
- RootIPCNamespace: kernel.NewIPCNamespace(creds.UserNamespace),
- RootAbstractSocketNamespace: kernel.NewAbstractSocketNamespace(),
- PIDNamespace: kernel.NewRootPIDNamespace(creds.UserNamespace),
- }); err != nil {
- return nil, fmt.Errorf("initializing kernel: %v", err)
- }
-
- k.VFS().MustRegisterFilesystemType(tmpfs.Name, &tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- AllowUserList: true,
- })
-
- ls, err := limits.NewLinuxLimitSet()
- if err != nil {
- return nil, err
- }
- tg := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, ls)
- k.TestOnly_SetGlobalInit(tg)
-
- return k, nil
-}
-
-// CreateTask creates a new bare bones task for tests.
-func CreateTask(ctx context.Context, name string, tc *kernel.ThreadGroup, mntns *vfs.MountNamespace, root, cwd vfs.VirtualDentry) (*kernel.Task, error) {
- k := kernel.KernelFromContext(ctx)
- if k == nil {
- return nil, fmt.Errorf("cannot find kernel from context")
- }
-
- exe, err := newFakeExecutable(ctx, k.VFS(), auth.CredentialsFromContext(ctx), root)
- if err != nil {
- return nil, err
- }
- m := mm.NewMemoryManager(k, k, k.SleepForAddressSpaceActivation)
- m.SetExecutable(ctx, fsbridge.NewVFSFile(exe))
-
- config := &kernel.TaskConfig{
- Kernel: k,
- ThreadGroup: tc,
- TaskContext: &kernel.TaskContext{Name: name, MemoryManager: m},
- Credentials: auth.CredentialsFromContext(ctx),
- NetworkNamespace: k.RootNetworkNamespace(),
- AllowedCPUMask: sched.NewFullCPUSet(k.ApplicationCores()),
- UTSNamespace: kernel.UTSNamespaceFromContext(ctx),
- IPCNamespace: kernel.IPCNamespaceFromContext(ctx),
- AbstractSocketNamespace: kernel.NewAbstractSocketNamespace(),
- MountNamespaceVFS2: mntns,
- FSContext: kernel.NewFSContextVFS2(root, cwd, 0022),
- FDTable: k.NewFDTable(),
- }
- t, err := k.TaskSet().NewTask(ctx, config)
- if err != nil {
- config.ThreadGroup.Release(ctx)
- return nil, err
- }
- return t, nil
-}
-
-func newFakeExecutable(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, root vfs.VirtualDentry) (*vfs.FileDescription, error) {
- const name = "executable"
- pop := &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }
- opts := &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_CREAT,
- Mode: 0777,
- }
- return vfsObj.OpenAt(ctx, creds, pop, opts)
-}
-
-func createMemoryFile() (*pgalloc.MemoryFile, error) {
- const memfileName = "test-memory"
- memfd, err := memutil.CreateMemFD(memfileName, 0)
- if err != nil {
- return nil, fmt.Errorf("error creating memfd: %v", err)
- }
- memfile := os.NewFile(uintptr(memfd), memfileName)
- mf, err := pgalloc.NewMemoryFile(memfile, pgalloc.MemoryFileOpts{})
- if err != nil {
- memfile.Close()
- return nil, fmt.Errorf("error creating pgalloc.MemoryFile: %v", err)
- }
- return mf, nil
-}
diff --git a/pkg/sentry/fsimpl/testutil/testutil.go b/pkg/sentry/fsimpl/testutil/testutil.go
deleted file mode 100644
index 1a8525b06..000000000
--- a/pkg/sentry/fsimpl/testutil/testutil.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package testutil provides common test utilities for kernfs-based
-// filesystems.
-package testutil
-
-import (
- "fmt"
- "io"
- "strings"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// System represents the context for a single test.
-//
-// Test systems must be explicitly destroyed with System.Destroy.
-type System struct {
- t *testing.T
- Ctx context.Context
- Creds *auth.Credentials
- VFS *vfs.VirtualFilesystem
- Root vfs.VirtualDentry
- MntNs *vfs.MountNamespace
-}
-
-// NewSystem constructs a System.
-//
-// Precondition: Caller must hold a reference on mns, whose ownership
-// is transferred to the new System.
-func NewSystem(ctx context.Context, t *testing.T, v *vfs.VirtualFilesystem, mns *vfs.MountNamespace) *System {
- root := mns.Root()
- root.IncRef()
- s := &System{
- t: t,
- Ctx: ctx,
- Creds: auth.CredentialsFromContext(ctx),
- VFS: v,
- MntNs: mns,
- Root: root,
- }
- return s
-}
-
-// WithSubtest creates a temporary test system with a new test harness,
-// referencing all other resources from the original system. This is useful when
-// a system is reused for multiple subtests, and the T needs to change for each
-// case. Note that this is safe when test cases run in parallel, as all
-// resources referenced by the system are immutable, or handle interior
-// mutations in a thread-safe manner.
-//
-// The returned system must not outlive the original and should not be destroyed
-// via System.Destroy.
-func (s *System) WithSubtest(t *testing.T) *System {
- return &System{
- t: t,
- Ctx: s.Ctx,
- Creds: s.Creds,
- VFS: s.VFS,
- MntNs: s.MntNs,
- Root: s.Root,
- }
-}
-
-// WithTemporaryContext constructs a temporary test system with a new context
-// ctx. The temporary system borrows all resources and references from the
-// original system. The returned temporary system must not outlive the original
-// system, and should not be destroyed via System.Destroy.
-func (s *System) WithTemporaryContext(ctx context.Context) *System {
- return &System{
- t: s.t,
- Ctx: ctx,
- Creds: s.Creds,
- VFS: s.VFS,
- MntNs: s.MntNs,
- Root: s.Root,
- }
-}
-
-// Destroy release resources associated with a test system.
-func (s *System) Destroy() {
- s.Root.DecRef(s.Ctx)
- s.MntNs.DecRef(s.Ctx) // Reference on MntNs passed to NewSystem.
-}
-
-// ReadToEnd reads the contents of fd until EOF to a string.
-func (s *System) ReadToEnd(fd *vfs.FileDescription) (string, error) {
- buf := make([]byte, usermem.PageSize)
- bufIOSeq := usermem.BytesIOSequence(buf)
- opts := vfs.ReadOptions{}
-
- var content strings.Builder
- for {
- n, err := fd.Read(s.Ctx, bufIOSeq, opts)
- if n == 0 || err != nil {
- if err == io.EOF {
- err = nil
- }
- return content.String(), err
- }
- content.Write(buf[:n])
- }
-}
-
-// PathOpAtRoot constructs a PathOperation with the given path from
-// the root of the filesystem.
-func (s *System) PathOpAtRoot(path string) *vfs.PathOperation {
- return &vfs.PathOperation{
- Root: s.Root,
- Start: s.Root,
- Path: fspath.Parse(path),
- }
-}
-
-// GetDentryOrDie attempts to resolve a dentry referred to by the
-// provided path operation. If unsuccessful, the test fails.
-func (s *System) GetDentryOrDie(pop *vfs.PathOperation) vfs.VirtualDentry {
- vd, err := s.VFS.GetDentryAt(s.Ctx, s.Creds, pop, &vfs.GetDentryOptions{})
- if err != nil {
- s.t.Fatalf("GetDentryAt(pop:%+v) failed: %v", pop, err)
- }
- return vd
-}
-
-// DirentType is an alias for values for linux_dirent64.d_type.
-type DirentType = uint8
-
-// ListDirents lists the Dirents for a directory at pop.
-func (s *System) ListDirents(pop *vfs.PathOperation) *DirentCollector {
- fd, err := s.VFS.OpenAt(s.Ctx, s.Creds, pop, &vfs.OpenOptions{Flags: linux.O_RDONLY})
- if err != nil {
- s.t.Fatalf("OpenAt for PathOperation %+v failed: %v", pop, err)
- }
- defer fd.DecRef(s.Ctx)
-
- collector := &DirentCollector{}
- if err := fd.IterDirents(s.Ctx, collector); err != nil {
- s.t.Fatalf("IterDirent failed: %v", err)
- }
- return collector
-}
-
-// AssertAllDirentTypes verifies that the set of dirents in collector contains
-// exactly the specified set of expected entries. AssertAllDirentTypes respects
-// collector.skipDots, and implicitly checks for "." and ".." accordingly.
-func (s *System) AssertAllDirentTypes(collector *DirentCollector, expected map[string]DirentType) {
- if expected == nil {
- expected = make(map[string]DirentType)
- }
- // Also implicitly check for "." and "..", if enabled.
- if !collector.skipDots {
- expected["."] = linux.DT_DIR
- expected[".."] = linux.DT_DIR
- }
-
- dentryTypes := make(map[string]DirentType)
- collector.mu.Lock()
- for _, dirent := range collector.dirents {
- dentryTypes[dirent.Name] = dirent.Type
- }
- collector.mu.Unlock()
- if diff := cmp.Diff(expected, dentryTypes); diff != "" {
- s.t.Fatalf("IterDirent had unexpected results:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-// AssertDirentOffsets verifies that collector contains at least the entries
-// specified in expected, with the given NextOff field. Entries specified in
-// expected but missing from collector result in failure. Extra entries in
-// collector are ignored. AssertDirentOffsets respects collector.skipDots, and
-// implicitly checks for "." and ".." accordingly.
-func (s *System) AssertDirentOffsets(collector *DirentCollector, expected map[string]int64) {
- // Also implicitly check for "." and "..", if enabled.
- if !collector.skipDots {
- expected["."] = 1
- expected[".."] = 2
- }
-
- dentryNextOffs := make(map[string]int64)
- collector.mu.Lock()
- for _, dirent := range collector.dirents {
- // Ignore extra entries in dentries that are not in expected.
- if _, ok := expected[dirent.Name]; ok {
- dentryNextOffs[dirent.Name] = dirent.NextOff
- }
- }
- collector.mu.Unlock()
- if diff := cmp.Diff(expected, dentryNextOffs); diff != "" {
- s.t.Fatalf("IterDirent had unexpected results:\n--- want\n+++ got\n%v", diff)
- }
-}
-
-// DirentCollector provides an implementation for vfs.IterDirentsCallback for
-// testing. It simply iterates to the end of a given directory FD and collects
-// all dirents emitted by the callback.
-type DirentCollector struct {
- mu sync.Mutex
- order []*vfs.Dirent
- dirents map[string]*vfs.Dirent
- // When the collector is used in various Assert* functions, should "." and
- // ".." be implicitly checked?
- skipDots bool
-}
-
-// SkipDotsChecks enables or disables the implicit checks on "." and ".." when
-// the collector is used in various Assert* functions. Note that "." and ".."
-// are still collected if passed to d.Handle, so the caller should only disable
-// the checks when they aren't expected.
-func (d *DirentCollector) SkipDotsChecks(value bool) {
- d.skipDots = value
-}
-
-// Handle implements vfs.IterDirentsCallback.Handle.
-func (d *DirentCollector) Handle(dirent vfs.Dirent) error {
- d.mu.Lock()
- if d.dirents == nil {
- d.dirents = make(map[string]*vfs.Dirent)
- }
- d.order = append(d.order, &dirent)
- d.dirents[dirent.Name] = &dirent
- d.mu.Unlock()
- return nil
-}
-
-// Count returns the number of dirents currently in the collector.
-func (d *DirentCollector) Count() int {
- d.mu.Lock()
- defer d.mu.Unlock()
- return len(d.dirents)
-}
-
-// Contains checks whether the collector has a dirent with the given name and
-// type.
-func (d *DirentCollector) Contains(name string, typ uint8) error {
- d.mu.Lock()
- defer d.mu.Unlock()
- dirent, ok := d.dirents[name]
- if !ok {
- return fmt.Errorf("no dirent named %q found", name)
- }
- if dirent.Type != typ {
- return fmt.Errorf("dirent named %q found, but was expecting type %s, got: %+v", name, linux.DirentType.Parse(uint64(typ)), dirent)
- }
- return nil
-}
-
-// Dirents returns all dirents discovered by this collector.
-func (d *DirentCollector) Dirents() map[string]*vfs.Dirent {
- d.mu.Lock()
- dirents := make(map[string]*vfs.Dirent)
- for n, d := range d.dirents {
- dirents[n] = d
- }
- d.mu.Unlock()
- return dirents
-}
-
-// OrderedDirents returns an ordered list of dirents as discovered by this
-// collector.
-func (d *DirentCollector) OrderedDirents() []*vfs.Dirent {
- d.mu.Lock()
- dirents := make([]*vfs.Dirent, len(d.order))
- copy(dirents, d.order)
- d.mu.Unlock()
- return dirents
-}
diff --git a/pkg/sentry/fsimpl/timerfd/BUILD b/pkg/sentry/fsimpl/timerfd/BUILD
deleted file mode 100644
index fbb02a271..000000000
--- a/pkg/sentry/fsimpl/timerfd/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "timerfd",
- srcs = ["timerfd.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/context",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
diff --git a/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go b/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go
new file mode 100644
index 000000000..d3c01e9e7
--- /dev/null
+++ b/pkg/sentry/fsimpl/timerfd/timerfd_state_autogen.go
@@ -0,0 +1,52 @@
+// automatically generated by stateify.
+
+package timerfd
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (tfd *TimerFileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/timerfd.TimerFileDescription"
+}
+
+func (tfd *TimerFileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "events",
+ "timer",
+ "val",
+ }
+}
+
+func (tfd *TimerFileDescription) beforeSave() {}
+
+func (tfd *TimerFileDescription) StateSave(stateSinkObject state.Sink) {
+ tfd.beforeSave()
+ stateSinkObject.Save(0, &tfd.vfsfd)
+ stateSinkObject.Save(1, &tfd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &tfd.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &tfd.NoLockFD)
+ stateSinkObject.Save(4, &tfd.events)
+ stateSinkObject.Save(5, &tfd.timer)
+ stateSinkObject.Save(6, &tfd.val)
+}
+
+func (tfd *TimerFileDescription) afterLoad() {}
+
+func (tfd *TimerFileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &tfd.vfsfd)
+ stateSourceObject.Load(1, &tfd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &tfd.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &tfd.NoLockFD)
+ stateSourceObject.Load(4, &tfd.events)
+ stateSourceObject.Load(5, &tfd.timer)
+ stateSourceObject.Load(6, &tfd.val)
+}
+
+func init() {
+ state.Register((*TimerFileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/BUILD b/pkg/sentry/fsimpl/tmpfs/BUILD
deleted file mode 100644
index fe520b6fd..000000000
--- a/pkg/sentry/fsimpl/tmpfs/BUILD
+++ /dev/null
@@ -1,127 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "dentry_list",
- out = "dentry_list.go",
- package = "tmpfs",
- prefix = "dentry",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dentry",
- "Linker": "*dentry",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "tmpfs",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_template_instance(
- name = "inode_refs",
- out = "inode_refs.go",
- package = "tmpfs",
- prefix = "inode",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "inode",
- },
-)
-
-go_library(
- name = "tmpfs",
- srcs = [
- "dentry_list.go",
- "device_file.go",
- "directory.go",
- "filesystem.go",
- "fstree.go",
- "inode_refs.go",
- "named_pipe.go",
- "regular_file.go",
- "save_restore.go",
- "socket_file.go",
- "symlink.go",
- "tmpfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/amutex",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/fsutil",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/pipe",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/memmap",
- "//pkg/sentry/pgalloc",
- "//pkg/sentry/platform",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/uniqueid",
- "//pkg/sentry/usage",
- "//pkg/sentry/vfs",
- "//pkg/sentry/vfs/memxattr",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "benchmark_test",
- size = "small",
- srcs = ["benchmark_test.go"],
- deps = [
- ":tmpfs",
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/refs",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/tmpfs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- ],
-)
-
-go_test(
- name = "tmpfs_test",
- size = "small",
- srcs = [
- "pipe_test.go",
- "regular_file_test.go",
- "stat_test.go",
- "tmpfs_test.go",
- ],
- library = ":tmpfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go b/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
deleted file mode 100644
index 3cc63e732..000000000
--- a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package benchmark_test
-
-import (
- "fmt"
- "runtime"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/refs"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- _ "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// Differences from stat_benchmark:
-//
-// - Syscall interception, CopyInPath, copyOutStat, and overlayfs overheads are
-// not included.
-//
-// - *MountStat benchmarks use a tmpfs root mount and a tmpfs submount at /tmp.
-// Non-MountStat benchmarks use a tmpfs root mount and no submounts.
-// stat_benchmark uses a varying root mount, a tmpfs submount at /tmp, and a
-// subdirectory /tmp/<top_dir> (assuming TEST_TMPDIR == "/tmp"). Thus
-// stat_benchmark at depth 1 does a comparable amount of work to *MountStat
-// benchmarks at depth 2, and non-MountStat benchmarks at depth 3.
-var depths = []int{1, 2, 3, 8, 64, 100}
-
-const (
- mountPointName = "tmp"
- filename = "gvisor_test_temp_0_1557494568"
-)
-
-// This is copied from syscalls/linux/sys_file.go, with the dependency on
-// kernel.Task stripped out.
-func fileOpOn(ctx context.Context, mntns *fs.MountNamespace, root, wd *fs.Dirent, dirFD int32, path string, resolve bool, fn func(root *fs.Dirent, d *fs.Dirent) error) error {
- var (
- d *fs.Dirent // The file.
- rel *fs.Dirent // The relative directory for search (if required.)
- err error
- )
-
- // Extract the working directory (maybe).
- if len(path) > 0 && path[0] == '/' {
- // Absolute path; rel can be nil.
- } else if dirFD == linux.AT_FDCWD {
- // Need to reference the working directory.
- rel = wd
- } else {
- // Need to extract the given FD.
- return syserror.EBADF
- }
-
- // Lookup the node.
- remainingTraversals := uint(linux.MaxSymlinkTraversals)
- if resolve {
- d, err = mntns.FindInode(ctx, root, rel, path, &remainingTraversals)
- } else {
- d, err = mntns.FindLink(ctx, root, rel, path, &remainingTraversals)
- }
- if err != nil {
- return err
- }
-
- err = fn(root, d)
- d.DecRef(ctx)
- return err
-}
-
-func BenchmarkVFS1TmpfsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
-
- // Create VFS.
- tmpfsFS, ok := fs.FindFilesystem("tmpfs")
- if !ok {
- b.Fatalf("failed to find tmpfs filesystem type")
- }
- rootInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- mntns, err := fs.NewMountNamespace(ctx, rootInode)
- if err != nil {
- b.Fatalf("failed to create mount namespace: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- root := mntns.Root()
- defer root.DecRef(ctx)
- d := root
- d.IncRef()
- defer d.DecRef(ctx)
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- if err := d.Inode.CreateDirectory(ctx, d, name, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- next, err := d.Walk(ctx, root, name)
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- d.DecRef(ctx)
- d = next
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- file, err := d.Inode.Create(ctx, d, filename, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0644))
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- file.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- dirPath := false
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
- if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
- }
- uattr, err := d.Inode.UnstableAttr(ctx)
- if err != nil {
- return err
- }
- // Sanity check.
- if uattr.Perms.User.Execute {
- b.Fatalf("got wrong permissions (%0o)", uattr.Perms.LinuxMode())
- }
- return nil
- })
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS2TmpfsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- b.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- vd := root
- vd.IncRef()
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- pop := vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(name),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- nextVD, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- vd.DecRef(ctx)
- vd = nextVD
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(filename),
- FollowFinalSymlink: true,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: 0644,
- })
- vd.DecRef(ctx)
- vd = vfs.VirtualDentry{}
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- defer fd.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Mode&^linux.S_IFMT != 0644 {
- b.Fatalf("got wrong permissions (%0o)", stat.Mode)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS1TmpfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
-
- // Create VFS.
- tmpfsFS, ok := fs.FindFilesystem("tmpfs")
- if !ok {
- b.Fatalf("failed to find tmpfs filesystem type")
- }
- rootInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- mntns, err := fs.NewMountNamespace(ctx, rootInode)
- if err != nil {
- b.Fatalf("failed to create mount namespace: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create and mount the submount.
- root := mntns.Root()
- defer root.DecRef(ctx)
- if err := root.Inode.CreateDirectory(ctx, root, mountPointName, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create mount point: %v", err)
- }
- mountPoint, err := root.Walk(ctx, root, mountPointName)
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
- submountInode, err := tmpfsFS.Mount(ctx, "tmpfs", fs.MountSourceFlags{}, "", nil)
- if err != nil {
- b.Fatalf("failed to create tmpfs submount: %v", err)
- }
- if err := mntns.Mount(ctx, mountPoint, submountInode); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- filePathBuilder.WriteString(mountPointName)
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- d, err := root.Walk(ctx, root, mountPointName)
- if err != nil {
- b.Fatalf("failed to walk to mount root: %v", err)
- }
- defer d.DecRef(ctx)
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- if err := d.Inode.CreateDirectory(ctx, d, name, fs.FilePermsFromMode(0755)); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- next, err := d.Walk(ctx, root, name)
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- d.DecRef(ctx)
- d = next
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- file, err := d.Inode.Create(ctx, d, filename, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0644))
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- file.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- dirPath := false
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
- if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
- }
- uattr, err := d.Inode.UnstableAttr(ctx)
- if err != nil {
- return err
- }
- // Sanity check.
- if uattr.Perms.User.Execute {
- b.Fatalf("got wrong permissions (%0o)", uattr.Perms.LinuxMode())
- }
- return nil
- })
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func BenchmarkVFS2TmpfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- b.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- b.Fatalf("failed to create tmpfs root mount: %v", err)
- }
- defer mntns.DecRef(ctx)
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
-
- // Create the mount point.
- root := mntns.Root()
- root.IncRef()
- defer root.DecRef(ctx)
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(mountPointName),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create mount point: %v", err)
- }
- // Save the mount point for later use.
- mountPoint, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
- // Create and mount the submount.
- if _, err := vfsObj.MountAt(ctx, creds, "", &pop, "tmpfs", &vfs.MountOptions{}); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- filePathBuilder.WriteString(mountPointName)
- filePathBuilder.WriteByte('/')
-
- // Create nested directories with given depth.
- vd, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount root: %v", err)
- }
- for i := depth; i > 0; i-- {
- name := fmt.Sprintf("%d", i)
- pop := vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(name),
- }
- if err := vfsObj.MkdirAt(ctx, creds, &pop, &vfs.MkdirOptions{
- Mode: 0755,
- }); err != nil {
- b.Fatalf("failed to create directory %q: %v", name, err)
- }
- nextVD, err := vfsObj.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to directory %q: %v", name, err)
- }
- vd.DecRef(ctx)
- vd = nextVD
- filePathBuilder.WriteString(name)
- filePathBuilder.WriteByte('/')
- }
-
- // Create the file that will be stat'd.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: vd,
- Path: fspath.Parse(filename),
- FollowFinalSymlink: true,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: 0644,
- })
- vd.DecRef(ctx)
- if err != nil {
- b.Fatalf("failed to create file %q: %v", filename, err)
- }
- fd.DecRef(ctx)
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Mode&^linux.S_IFMT != 0644 {
- b.Fatalf("got wrong permissions (%0o)", stat.Mode)
- }
- }
- // Don't include deferred cleanup in benchmark time.
- b.StopTimer()
- })
- }
-}
-
-func init() {
- // Turn off reference leak checking for a fair comparison between vfs1 and
- // vfs2.
- refs.SetLeakMode(refs.NoLeakChecking)
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/dentry_list.go b/pkg/sentry/fsimpl/tmpfs/dentry_list.go
new file mode 100644
index 000000000..95e3d13d5
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/dentry_list.go
@@ -0,0 +1,193 @@
+package tmpfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type dentryElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (dentryElementMapper) linkerFor(elem *dentry) *dentry { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type dentryList struct {
+ head *dentry
+ tail *dentry
+}
+
+// Reset resets list l to the empty state.
+func (l *dentryList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+func (l *dentryList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+func (l *dentryList) Front() *dentry {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+func (l *dentryList) Back() *dentry {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+func (l *dentryList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (dentryElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+func (l *dentryList) PushFront(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ dentryElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+func (l *dentryList) PushBack(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+func (l *dentryList) PushBackList(m *dentryList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ dentryElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ dentryElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+func (l *dentryList) InsertAfter(b, e *dentry) {
+ bLinker := dentryElementMapper{}.linkerFor(b)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ dentryElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+func (l *dentryList) InsertBefore(a, e *dentry) {
+ aLinker := dentryElementMapper{}.linkerFor(a)
+ eLinker := dentryElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ dentryElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+func (l *dentryList) Remove(e *dentry) {
+ linker := dentryElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ dentryElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ dentryElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type dentryEntry struct {
+ next *dentry
+ prev *dentry
+}
+
+// Next returns the entry that follows e in the list.
+func (e *dentryEntry) Next() *dentry {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+func (e *dentryEntry) Prev() *dentry {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+func (e *dentryEntry) SetNext(elem *dentry) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+func (e *dentryEntry) SetPrev(elem *dentry) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/fstree.go b/pkg/sentry/fsimpl/tmpfs/fstree.go
new file mode 100644
index 000000000..d46351488
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/fstree.go
@@ -0,0 +1,55 @@
+package tmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/sentry/vfs"
+)
+
+// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
+// either d2's parent or an ancestor of d2's parent.
+func genericIsAncestorDentry(d, d2 *dentry) bool {
+ for d2 != nil {
+ if d2.parent == d {
+ return true
+ }
+ if d2.parent == d2 {
+ return false
+ }
+ d2 = d2.parent
+ }
+ return false
+}
+
+// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
+func genericParentOrSelf(d *dentry) *dentry {
+ if d.parent != nil {
+ return d.parent
+ }
+ return d
+}
+
+// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
+func genericPrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *dentry, b *fspath.Builder) error {
+ for {
+ if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
+ return vfs.PrependPathAtVFSRootError{}
+ }
+ if mnt != nil && &d.vfsd == mnt.Root() {
+ return nil
+ }
+ if d.parent == nil {
+ return vfs.PrependPathAtNonMountRootError{}
+ }
+ b.PrependComponent(d.name)
+ d = d.parent
+ }
+}
+
+// DebugPathname returns a pathname to d relative to its filesystem root.
+// DebugPathname does not correspond to any Linux function; it's used to
+// generate dentry pathnames for debugging.
+func genericDebugPathname(d *dentry) string {
+ var b fspath.Builder
+ _ = genericPrependPath(vfs.VirtualDentry{}, nil, d, &b)
+ return b.String()
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/inode_refs.go b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
new file mode 100644
index 000000000..eff30ca09
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/inode_refs.go
@@ -0,0 +1,128 @@
+package tmpfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const inodeenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var inodeobj *inode
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// Note that the number of references is actually refCount + 1 so that a default
+// zero-value Refs object contains one reference.
+//
+// +stateify savable
+type inodeRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *inodeRefs) RefType() string {
+ return fmt.Sprintf("%T", inodeobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *inodeRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *inodeRefs) LogRefs() bool {
+ return inodeenableLogging
+}
+
+// EnableLeakCheck enables reference leak checking on r.
+func (r *inodeRefs) EnableLeakCheck() {
+ refsvfs2.Register(r)
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *inodeRefs) ReadRefs() int64 {
+
+ return atomic.LoadInt64(&r.refCount) + 1
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *inodeRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ refsvfs2.LogIncRef(r, v+1)
+ if v <= 0 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *inodeRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) < 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ refsvfs2.LogTryIncRef(r, v+1)
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *inodeRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ refsvfs2.LogDecRef(r, v+1)
+ switch {
+ case v < -1:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == -1:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *inodeRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ r.EnableLeakCheck()
+ }
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/pipe_test.go b/pkg/sentry/fsimpl/tmpfs/pipe_test.go
deleted file mode 100644
index 2f856ce36..000000000
--- a/pkg/sentry/fsimpl/tmpfs/pipe_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "bytes"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const fileName = "mypipe"
-
-func TestSeparateFDs(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the read side. This is done in a concurrently because opening
- // One end the pipe blocks until the other end is opened.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- rfdchan := make(chan *vfs.FileDescription)
- go func() {
- openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY}
- rfd, _ := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- rfdchan <- rfd
- }()
-
- // Open the write side.
- openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY}
- wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer wfd.DecRef(ctx)
-
- rfd, ok := <-rfdchan
- if !ok {
- t.Fatalf("failed to open pipe for reading %q", fileName)
- }
- defer rfd.DecRef(ctx)
-
- const msg = "vamos azul"
- checkEmpty(ctx, t, rfd)
- checkWrite(ctx, t, wfd, msg)
- checkRead(ctx, t, rfd, msg)
-}
-
-func TestNonblockingRead(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the read side as nonblocking.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_RDONLY | linux.O_NONBLOCK}
- rfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for reading %q: %v", fileName, err)
- }
- defer rfd.DecRef(ctx)
-
- // Open the write side.
- openOpts = vfs.OpenOptions{Flags: linux.O_WRONLY}
- wfd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer wfd.DecRef(ctx)
-
- const msg = "geh blau"
- checkEmpty(ctx, t, rfd)
- checkWrite(ctx, t, wfd, msg)
- checkRead(ctx, t, rfd, msg)
-}
-
-func TestNonblockingWriteError(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the write side as nonblocking, which should return ENXIO.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY | linux.O_NONBLOCK}
- _, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != syserror.ENXIO {
- t.Fatalf("expected ENXIO, but got error: %v", err)
- }
-}
-
-func TestSingleFD(t *testing.T) {
- ctx, creds, vfsObj, root := setup(t)
- defer root.DecRef(ctx)
-
- // Open the pipe as readable and writable.
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }
- openOpts := vfs.OpenOptions{Flags: linux.O_RDWR}
- fd, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != nil {
- t.Fatalf("failed to open pipe for writing %q: %v", fileName, err)
- }
- defer fd.DecRef(ctx)
-
- const msg = "forza blu"
- checkEmpty(ctx, t, fd)
- checkWrite(ctx, t, fd, msg)
- checkRead(ctx, t, fd, msg)
-}
-
-// setup creates a VFS with a pipe in the root directory at path fileName. The
-// returned VirtualDentry must be DecRef()'d be the caller. It calls t.Fatal
-// upon failure.
-func setup(t *testing.T) (context.Context, *auth.Credentials, *vfs.VirtualFilesystem, vfs.VirtualDentry) {
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("tmpfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- t.Fatalf("failed to create tmpfs root mount: %v", err)
- }
-
- // Create the pipe.
- root := mntns.Root()
- root.IncRef()
- pop := vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- }
- mknodOpts := vfs.MknodOptions{Mode: linux.ModeNamedPipe | 0644}
- if err := vfsObj.MknodAt(ctx, creds, &pop, &mknodOpts); err != nil {
- t.Fatalf("failed to create file %q: %v", fileName, err)
- }
-
- // Sanity check: the file pipe exists and has the correct mode.
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(fileName),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- t.Fatalf("stat(%q) failed: %v", fileName, err)
- }
- if stat.Mode&^linux.S_IFMT != 0644 {
- t.Errorf("got wrong permissions (%0o)", stat.Mode)
- }
- if stat.Mode&linux.S_IFMT != linux.ModeNamedPipe {
- t.Errorf("got wrong file type (%0o)", stat.Mode)
- }
-
- return ctx, creds, vfsObj, root
-}
-
-// checkEmpty calls t.Fatal if the pipe in fd is not empty.
-func checkEmpty(ctx context.Context, t *testing.T, fd *vfs.FileDescription) {
- readData := make([]byte, 1)
- dst := usermem.BytesIOSequence(readData)
- bytesRead, err := fd.Read(ctx, dst, vfs.ReadOptions{})
- if err != syserror.ErrWouldBlock {
- t.Fatalf("expected ErrWouldBlock reading from empty pipe %q, but got: %v", fileName, err)
- }
- if bytesRead != 0 {
- t.Fatalf("expected to read 0 bytes, but got %d", bytesRead)
- }
-}
-
-// checkWrite calls t.Fatal if it fails to write all of msg to fd.
-func checkWrite(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {
- writeData := []byte(msg)
- src := usermem.BytesIOSequence(writeData)
- bytesWritten, err := fd.Write(ctx, src, vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("error writing to pipe %q: %v", fileName, err)
- }
- if bytesWritten != int64(len(writeData)) {
- t.Fatalf("expected to write %d bytes, but wrote %d", len(writeData), bytesWritten)
- }
-}
-
-// checkRead calls t.Fatal if it fails to read msg from fd.
-func checkRead(ctx context.Context, t *testing.T, fd *vfs.FileDescription, msg string) {
- readData := make([]byte, len(msg))
- dst := usermem.BytesIOSequence(readData)
- bytesRead, err := fd.Read(ctx, dst, vfs.ReadOptions{})
- if err != nil {
- t.Fatalf("error reading from pipe %q: %v", fileName, err)
- }
- if bytesRead != int64(len(msg)) {
- t.Fatalf("expected to read %d bytes, but got %d", len(msg), bytesRead)
- }
- if !bytes.Equal(readData, []byte(msg)) {
- t.Fatalf("expected to read %q from pipe, but got %q", msg, string(readData))
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go b/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
deleted file mode 100644
index 146c7fdfe..000000000
--- a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "bytes"
- "fmt"
- "io"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// Test that we can write some data to a file and read it back.`
-func TestSimpleWriteRead(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Write.
- data := []byte("foobarbaz")
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(len(data)); got != want {
- t.Errorf("fd.Write left offset at %d, want %d", got, want)
- }
-
- // Seek back to beginning.
- if _, err := fd.Seek(ctx, 0, linux.SEEK_SET); err != nil {
- t.Fatalf("fd.Seek failed: %v", err)
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(0); got != want {
- t.Errorf("fd.Seek(0) left offset at %d, want %d", got, want)
- }
-
- // Read.
- buf := make([]byte, len(data))
- n, err = fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.Read failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Read got short read length %d, want %d", n, len(data))
- }
- if got, want := string(buf), string(data); got != want {
- t.Errorf("Read got %q want %s", got, want)
- }
- if got, want := fd.Impl().(*regularFileFD).off, int64(len(data)); got != want {
- t.Errorf("fd.Write left offset at %d, want %d", got, want)
- }
-}
-
-func TestPWrite(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Fill file with 1k 'a's.
- data := bytes.Repeat([]byte{'a'}, 1000)
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
-
- // Write "gVisor is awesome" at various offsets.
- buf := []byte("gVisor is awesome")
- offsets := []int{0, 1, 2, 10, 20, 50, 100, len(data) - 100, len(data) - 1, len(data), len(data) + 1}
- for _, offset := range offsets {
- name := fmt.Sprintf("PWrite offset=%d", offset)
- t.Run(name, func(t *testing.T) {
- n, err := fd.PWrite(ctx, usermem.BytesIOSequence(buf), int64(offset), vfs.WriteOptions{})
- if err != nil {
- t.Errorf("fd.PWrite got err %v want nil", err)
- }
- if n != int64(len(buf)) {
- t.Errorf("fd.PWrite got %d bytes want %d", n, len(buf))
- }
-
- // Update data to reflect expected file contents.
- if len(data) < offset+len(buf) {
- data = append(data, make([]byte, (offset+len(buf))-len(data))...)
- }
- copy(data[offset:], buf)
-
- // Read the whole file and compare with data.
- readBuf := make([]byte, len(data))
- n, err = fd.PRead(ctx, usermem.BytesIOSequence(readBuf), 0, vfs.ReadOptions{})
- if err != nil {
- t.Fatalf("fd.PRead failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.PRead got short read length %d, want %d", n, len(data))
- }
- if got, want := string(readBuf), string(data); got != want {
- t.Errorf("PRead got %q want %s", got, want)
- }
-
- })
- }
-}
-
-func TestLocks(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- uid1 := 123
- uid2 := 456
- if err := fd.Impl().LockBSD(ctx, uid1, lock.ReadLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
- if err := fd.Impl().LockBSD(ctx, uid2, lock.ReadLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
- if got, want := fd.Impl().LockBSD(ctx, uid2, lock.WriteLock, nil), syserror.ErrWouldBlock; got != want {
- t.Fatalf("fd.Impl().LockBSD failed: got = %v, want = %v", got, want)
- }
- if err := fd.Impl().UnlockBSD(ctx, uid1); err != nil {
- t.Fatalf("fd.Impl().UnlockBSD failed: err = %v", err)
- }
- if err := fd.Impl().LockBSD(ctx, uid2, lock.WriteLock, nil); err != nil {
- t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
- }
-
- if err := fd.Impl().LockPOSIX(ctx, uid1, lock.ReadLock, 0, 1, linux.SEEK_SET, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if err := fd.Impl().LockPOSIX(ctx, uid2, lock.ReadLock, 1, 2, linux.SEEK_SET, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if err := fd.Impl().LockPOSIX(ctx, uid1, lock.WriteLock, 0, 1, linux.SEEK_SET, nil); err != nil {
- t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
- }
- if got, want := fd.Impl().LockPOSIX(ctx, uid2, lock.ReadLock, 0, 1, linux.SEEK_SET, nil), syserror.ErrWouldBlock; got != want {
- t.Fatalf("fd.Impl().LockPOSIX failed: got = %v, want = %v", got, want)
- }
- if err := fd.Impl().UnlockPOSIX(ctx, uid1, 0, 1, linux.SEEK_SET); err != nil {
- t.Fatalf("fd.Impl().UnlockPOSIX failed: err = %v", err)
- }
-}
-
-func TestPRead(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Write 100 sequences of 'gVisor is awesome'.
- data := bytes.Repeat([]byte("gVisor is awsome"), 100)
- n, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
- if n != int64(len(data)) {
- t.Errorf("fd.Write got short write length %d, want %d", n, len(data))
- }
-
- // Read various sizes from various offsets.
- sizes := []int{0, 1, 2, 10, 20, 50, 100, 1000}
- offsets := []int{0, 1, 2, 10, 20, 50, 100, 1000, len(data) - 100, len(data) - 1, len(data), len(data) + 1}
-
- for _, size := range sizes {
- for _, offset := range offsets {
- name := fmt.Sprintf("PRead offset=%d size=%d", offset, size)
- t.Run(name, func(t *testing.T) {
- var (
- wantRead []byte
- wantErr error
- )
- if offset < len(data) {
- wantRead = data[offset:]
- } else if size > 0 {
- wantErr = io.EOF
- }
- if offset+size < len(data) {
- wantRead = wantRead[:size]
- }
- buf := make([]byte, size)
- n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), int64(offset), vfs.ReadOptions{})
- if err != wantErr {
- t.Errorf("fd.PRead got err %v want %v", err, wantErr)
- }
- if n != int64(len(wantRead)) {
- t.Errorf("fd.PRead got %d bytes want %d", n, len(wantRead))
- }
- if got := string(buf[:n]); got != string(wantRead) {
- t.Errorf("fd.PRead got %q want %q", got, string(wantRead))
- }
- })
- }
- }
-}
-
-func TestTruncate(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- // Fill the file with some data.
- data := bytes.Repeat([]byte("gVisor is awsome"), 100)
- written, err := fd.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- t.Fatalf("fd.Write failed: %v", err)
- }
-
- // Size should be same as written.
- sizeStatOpts := vfs.StatOptions{Mask: linux.STATX_SIZE}
- stat, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := int64(stat.Size), written; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
-
- // Truncate down.
- newSize := uint64(10)
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- // Size should be updated.
- statAfterTruncateDown, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := statAfterTruncateDown.Size, newSize; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
- // We should only read newSize worth of data.
- buf := make([]byte, 1000)
- if n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0, vfs.ReadOptions{}); err != nil && err != io.EOF {
- t.Fatalf("fd.PRead failed: %v", err)
- } else if uint64(n) != newSize {
- t.Errorf("fd.PRead got size %d, want %d", n, newSize)
- }
- // Mtime and Ctime should be bumped.
- if got := statAfterTruncateDown.Mtime.ToNsec(); got <= stat.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want > %v", got, stat.Mtime)
- }
- if got := statAfterTruncateDown.Ctime.ToNsec(); got <= stat.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want > %v", got, stat.Ctime)
- }
-
- // Truncate up.
- newSize = 100
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- // Size should be updated.
- statAfterTruncateUp, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- if got, want := statAfterTruncateUp.Size, newSize; got != want {
- t.Errorf("fd.Stat got size %d, want %d", got, want)
- }
- // We should read newSize worth of data.
- buf = make([]byte, 1000)
- if n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0, vfs.ReadOptions{}); err != nil && err != io.EOF {
- t.Fatalf("fd.PRead failed: %v", err)
- } else if uint64(n) != newSize {
- t.Errorf("fd.PRead got size %d, want %d", n, newSize)
- }
- // Bytes should be null after 10, since we previously truncated to 10.
- for i := uint64(10); i < newSize; i++ {
- if buf[i] != 0 {
- t.Errorf("fd.PRead got byte %d=%x, want 0", i, buf[i])
- break
- }
- }
- // Mtime and Ctime should be bumped.
- if got := statAfterTruncateUp.Mtime.ToNsec(); got <= statAfterTruncateDown.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want > %v", got, statAfterTruncateDown.Mtime)
- }
- if got := statAfterTruncateUp.Ctime.ToNsec(); got <= statAfterTruncateDown.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want > %v", got, stat.Ctime)
- }
-
- // Truncate to the current size.
- newSize = statAfterTruncateUp.Size
- if err := fd.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: linux.STATX_SIZE,
- Size: newSize,
- },
- }); err != nil {
- t.Errorf("fd.Truncate failed: %v", err)
- }
- statAfterTruncateNoop, err := fd.Stat(ctx, sizeStatOpts)
- if err != nil {
- t.Fatalf("fd.Stat failed: %v", err)
- }
- // Mtime and Ctime should not be bumped, since operation is a noop.
- if got := statAfterTruncateNoop.Mtime.ToNsec(); got != statAfterTruncateUp.Mtime.ToNsec() {
- t.Errorf("fd.Stat got Mtime %v, want %v", got, statAfterTruncateUp.Mtime)
- }
- if got := statAfterTruncateNoop.Ctime.ToNsec(); got != statAfterTruncateUp.Ctime.ToNsec() {
- t.Errorf("fd.Stat got Ctime %v, want %v", got, statAfterTruncateUp.Ctime)
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/stat_test.go b/pkg/sentry/fsimpl/tmpfs/stat_test.go
deleted file mode 100644
index f7ee4aab2..000000000
--- a/pkg/sentry/fsimpl/tmpfs/stat_test.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "fmt"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-func TestStatAfterCreate(t *testing.T) {
- ctx := contexttest.Context(t)
- mode := linux.FileMode(0644)
-
- // Run with different file types.
- for _, typ := range []string{"file", "dir", "pipe"} {
- t.Run(fmt.Sprintf("type=%q", typ), func(t *testing.T) {
- var (
- fd *vfs.FileDescription
- cleanup func()
- err error
- )
- switch typ {
- case "file":
- fd, cleanup, err = newFileFD(ctx, mode)
- case "dir":
- fd, cleanup, err = newDirFD(ctx, mode)
- case "pipe":
- fd, cleanup, err = newPipeFD(ctx, mode)
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- got, err := fd.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Atime, Ctime, Mtime should all be current time (non-zero).
- atime, ctime, mtime := got.Atime.ToNsec(), got.Ctime.ToNsec(), got.Mtime.ToNsec()
- if atime != ctime || ctime != mtime {
- t.Errorf("got atime=%d ctime=%d mtime=%d, wanted equal values", atime, ctime, mtime)
- }
- if atime == 0 {
- t.Errorf("got atime=%d, want non-zero", atime)
- }
-
- // Btime should be 0, as it is not set by tmpfs.
- if btime := got.Btime.ToNsec(); btime != 0 {
- t.Errorf("got btime %d, want 0", got.Btime.ToNsec())
- }
-
- // Size should be 0 (except for directories, which make up a size
- // of 20 per entry, including the "." and ".." entries present in
- // otherwise-empty directories).
- wantSize := uint64(0)
- if typ == "dir" {
- wantSize = 40
- }
- if got.Size != wantSize {
- t.Errorf("got size %d, want %d", got.Size, wantSize)
- }
-
- // Nlink should be 1 for files, 2 for dirs.
- wantNlink := uint32(1)
- if typ == "dir" {
- wantNlink = 2
- }
- if got.Nlink != wantNlink {
- t.Errorf("got nlink %d, want %d", got.Nlink, wantNlink)
- }
-
- // UID and GID are set from context creds.
- creds := auth.CredentialsFromContext(ctx)
- if got.UID != uint32(creds.EffectiveKUID) {
- t.Errorf("got uid %d, want %d", got.UID, uint32(creds.EffectiveKUID))
- }
- if got.GID != uint32(creds.EffectiveKGID) {
- t.Errorf("got gid %d, want %d", got.GID, uint32(creds.EffectiveKGID))
- }
-
- // Mode.
- wantMode := uint16(mode)
- switch typ {
- case "file":
- wantMode |= linux.S_IFREG
- case "dir":
- wantMode |= linux.S_IFDIR
- case "pipe":
- wantMode |= linux.S_IFIFO
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
-
- if got.Mode != wantMode {
- t.Errorf("got mode %x, want %x", got.Mode, wantMode)
- }
-
- // Ino.
- if got.Ino == 0 {
- t.Errorf("got ino %d, want not 0", got.Ino)
- }
- })
- }
-}
-
-func TestSetStatAtime(t *testing.T) {
- ctx := contexttest.Context(t)
- fd, cleanup, err := newFileFD(ctx, 0644)
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- allStatOptions := vfs.StatOptions{Mask: linux.STATX_ALL}
-
- // Get initial stat.
- initialStat, err := fd.Stat(ctx, allStatOptions)
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Set atime, but without the mask.
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: linux.Statx{
- Mask: 0,
- Atime: linux.NsecToStatxTimestamp(100),
- }}); err != nil {
- t.Errorf("SetStat atime without mask failed: %v", err)
- }
- // Atime should be unchanged.
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != initialStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, initialStat.Atime)
- }
-
- // Set atime, this time included in the mask.
- setStat := linux.Statx{
- Mask: linux.STATX_ATIME,
- Atime: linux.NsecToStatxTimestamp(100),
- }
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: setStat}); err != nil {
- t.Errorf("SetStat atime with mask failed: %v", err)
- }
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != setStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, setStat.Atime)
- }
-}
-
-func TestSetStat(t *testing.T) {
- ctx := contexttest.Context(t)
- mode := linux.FileMode(0644)
-
- // Run with different file types.
- for _, typ := range []string{"file", "dir", "pipe"} {
- t.Run(fmt.Sprintf("type=%q", typ), func(t *testing.T) {
- var (
- fd *vfs.FileDescription
- cleanup func()
- err error
- )
- switch typ {
- case "file":
- fd, cleanup, err = newFileFD(ctx, mode)
- case "dir":
- fd, cleanup, err = newDirFD(ctx, mode)
- case "pipe":
- fd, cleanup, err = newPipeFD(ctx, mode)
- default:
- panic(fmt.Sprintf("unknown typ %q", typ))
- }
- if err != nil {
- t.Fatal(err)
- }
- defer cleanup()
-
- allStatOptions := vfs.StatOptions{Mask: linux.STATX_ALL}
-
- // Get initial stat.
- initialStat, err := fd.Stat(ctx, allStatOptions)
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
-
- // Set atime, but without the mask.
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: linux.Statx{
- Mask: 0,
- Atime: linux.NsecToStatxTimestamp(100),
- }}); err != nil {
- t.Errorf("SetStat atime without mask failed: %v", err)
- }
- // Atime should be unchanged.
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != initialStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, initialStat.Atime)
- }
-
- // Set atime, this time included in the mask.
- setStat := linux.Statx{
- Mask: linux.STATX_ATIME,
- Atime: linux.NsecToStatxTimestamp(100),
- }
- if err := fd.SetStat(ctx, vfs.SetStatOptions{Stat: setStat}); err != nil {
- t.Errorf("SetStat atime with mask failed: %v", err)
- }
- if gotStat, err := fd.Stat(ctx, allStatOptions); err != nil {
- t.Errorf("Stat got error: %v", err)
- } else if gotStat.Atime != setStat.Atime {
- t.Errorf("Stat got atime %d, want %d", gotStat.Atime, setStat.Atime)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go
new file mode 100644
index 000000000..21c53afaf
--- /dev/null
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs_state_autogen.go
@@ -0,0 +1,556 @@
+// automatically generated by stateify.
+
+package tmpfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (l *dentryList) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentryList"
+}
+
+func (l *dentryList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *dentryList) beforeSave() {}
+
+func (l *dentryList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *dentryList) afterLoad() {}
+
+func (l *dentryList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *dentryEntry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentryEntry"
+}
+
+func (e *dentryEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *dentryEntry) beforeSave() {}
+
+func (e *dentryEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *dentryEntry) afterLoad() {}
+
+func (e *dentryEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (d *deviceFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.deviceFile"
+}
+
+func (d *deviceFile) StateFields() []string {
+ return []string{
+ "inode",
+ "kind",
+ "major",
+ "minor",
+ }
+}
+
+func (d *deviceFile) beforeSave() {}
+
+func (d *deviceFile) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.inode)
+ stateSinkObject.Save(1, &d.kind)
+ stateSinkObject.Save(2, &d.major)
+ stateSinkObject.Save(3, &d.minor)
+}
+
+func (d *deviceFile) afterLoad() {}
+
+func (d *deviceFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.inode)
+ stateSourceObject.Load(1, &d.kind)
+ stateSourceObject.Load(2, &d.major)
+ stateSourceObject.Load(3, &d.minor)
+}
+
+func (dir *directory) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.directory"
+}
+
+func (dir *directory) StateFields() []string {
+ return []string{
+ "dentry",
+ "inode",
+ "childMap",
+ "numChildren",
+ "childList",
+ }
+}
+
+func (dir *directory) beforeSave() {}
+
+func (dir *directory) StateSave(stateSinkObject state.Sink) {
+ dir.beforeSave()
+ stateSinkObject.Save(0, &dir.dentry)
+ stateSinkObject.Save(1, &dir.inode)
+ stateSinkObject.Save(2, &dir.childMap)
+ stateSinkObject.Save(3, &dir.numChildren)
+ stateSinkObject.Save(4, &dir.childList)
+}
+
+func (dir *directory) afterLoad() {}
+
+func (dir *directory) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &dir.dentry)
+ stateSourceObject.Load(1, &dir.inode)
+ stateSourceObject.Load(2, &dir.childMap)
+ stateSourceObject.Load(3, &dir.numChildren)
+ stateSourceObject.Load(4, &dir.childList)
+}
+
+func (fd *directoryFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.directoryFD"
+}
+
+func (fd *directoryFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "DirectoryFileDescriptionDefaultImpl",
+ "iter",
+ "off",
+ }
+}
+
+func (fd *directoryFD) beforeSave() {}
+
+func (fd *directoryFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.iter)
+ stateSinkObject.Save(3, &fd.off)
+}
+
+func (fd *directoryFD) afterLoad() {}
+
+func (fd *directoryFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.DirectoryFileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.iter)
+ stateSourceObject.Load(3, &fd.off)
+}
+
+func (r *inodeRefs) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.inodeRefs"
+}
+
+func (r *inodeRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *inodeRefs) beforeSave() {}
+
+func (r *inodeRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+func (r *inodeRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (n *namedPipe) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.namedPipe"
+}
+
+func (n *namedPipe) StateFields() []string {
+ return []string{
+ "inode",
+ "pipe",
+ }
+}
+
+func (n *namedPipe) beforeSave() {}
+
+func (n *namedPipe) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+ stateSinkObject.Save(0, &n.inode)
+ stateSinkObject.Save(1, &n.pipe)
+}
+
+func (n *namedPipe) afterLoad() {}
+
+func (n *namedPipe) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &n.inode)
+ stateSourceObject.Load(1, &n.pipe)
+}
+
+func (rf *regularFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.regularFile"
+}
+
+func (rf *regularFile) StateFields() []string {
+ return []string{
+ "inode",
+ "memoryUsageKind",
+ "mappings",
+ "writableMappingPages",
+ "data",
+ "seals",
+ "size",
+ }
+}
+
+func (rf *regularFile) beforeSave() {}
+
+func (rf *regularFile) StateSave(stateSinkObject state.Sink) {
+ rf.beforeSave()
+ stateSinkObject.Save(0, &rf.inode)
+ stateSinkObject.Save(1, &rf.memoryUsageKind)
+ stateSinkObject.Save(2, &rf.mappings)
+ stateSinkObject.Save(3, &rf.writableMappingPages)
+ stateSinkObject.Save(4, &rf.data)
+ stateSinkObject.Save(5, &rf.seals)
+ stateSinkObject.Save(6, &rf.size)
+}
+
+func (rf *regularFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rf.inode)
+ stateSourceObject.Load(1, &rf.memoryUsageKind)
+ stateSourceObject.Load(2, &rf.mappings)
+ stateSourceObject.Load(3, &rf.writableMappingPages)
+ stateSourceObject.Load(4, &rf.data)
+ stateSourceObject.Load(5, &rf.seals)
+ stateSourceObject.Load(6, &rf.size)
+ stateSourceObject.AfterLoad(rf.afterLoad)
+}
+
+func (fd *regularFileFD) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.regularFileFD"
+}
+
+func (fd *regularFileFD) StateFields() []string {
+ return []string{
+ "fileDescription",
+ "off",
+ }
+}
+
+func (fd *regularFileFD) beforeSave() {}
+
+func (fd *regularFileFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.fileDescription)
+ stateSinkObject.Save(1, &fd.off)
+}
+
+func (fd *regularFileFD) afterLoad() {}
+
+func (fd *regularFileFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.fileDescription)
+ stateSourceObject.Load(1, &fd.off)
+}
+
+func (s *socketFile) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.socketFile"
+}
+
+func (s *socketFile) StateFields() []string {
+ return []string{
+ "inode",
+ "ep",
+ }
+}
+
+func (s *socketFile) beforeSave() {}
+
+func (s *socketFile) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.inode)
+ stateSinkObject.Save(1, &s.ep)
+}
+
+func (s *socketFile) afterLoad() {}
+
+func (s *socketFile) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.inode)
+ stateSourceObject.Load(1, &s.ep)
+}
+
+func (s *symlink) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.symlink"
+}
+
+func (s *symlink) StateFields() []string {
+ return []string{
+ "inode",
+ "target",
+ }
+}
+
+func (s *symlink) beforeSave() {}
+
+func (s *symlink) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.inode)
+ stateSinkObject.Save(1, &s.target)
+}
+
+func (s *symlink) afterLoad() {}
+
+func (s *symlink) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.inode)
+ stateSourceObject.Load(1, &s.target)
+}
+
+func (fstype *FilesystemType) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.FilesystemType"
+}
+
+func (fstype *FilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (fstype *FilesystemType) beforeSave() {}
+
+func (fstype *FilesystemType) StateSave(stateSinkObject state.Sink) {
+ fstype.beforeSave()
+}
+
+func (fstype *FilesystemType) afterLoad() {}
+
+func (fstype *FilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *filesystem) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.filesystem"
+}
+
+func (fs *filesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "mfp",
+ "clock",
+ "devMinor",
+ "nextInoMinusOne",
+ "root",
+ }
+}
+
+func (fs *filesystem) beforeSave() {}
+
+func (fs *filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.mfp)
+ stateSinkObject.Save(2, &fs.clock)
+ stateSinkObject.Save(3, &fs.devMinor)
+ stateSinkObject.Save(4, &fs.nextInoMinusOne)
+ stateSinkObject.Save(5, &fs.root)
+}
+
+func (fs *filesystem) afterLoad() {}
+
+func (fs *filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.mfp)
+ stateSourceObject.Load(2, &fs.clock)
+ stateSourceObject.Load(3, &fs.devMinor)
+ stateSourceObject.Load(4, &fs.nextInoMinusOne)
+ stateSourceObject.Load(5, &fs.root)
+}
+
+func (f *FilesystemOpts) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.FilesystemOpts"
+}
+
+func (f *FilesystemOpts) StateFields() []string {
+ return []string{
+ "RootFileType",
+ "RootSymlinkTarget",
+ "FilesystemType",
+ }
+}
+
+func (f *FilesystemOpts) beforeSave() {}
+
+func (f *FilesystemOpts) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.RootFileType)
+ stateSinkObject.Save(1, &f.RootSymlinkTarget)
+ stateSinkObject.Save(2, &f.FilesystemType)
+}
+
+func (f *FilesystemOpts) afterLoad() {}
+
+func (f *FilesystemOpts) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.RootFileType)
+ stateSourceObject.Load(1, &f.RootSymlinkTarget)
+ stateSourceObject.Load(2, &f.FilesystemType)
+}
+
+func (d *dentry) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.dentry"
+}
+
+func (d *dentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "parent",
+ "name",
+ "dentryEntry",
+ "inode",
+ }
+}
+
+func (d *dentry) beforeSave() {}
+
+func (d *dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.parent)
+ stateSinkObject.Save(2, &d.name)
+ stateSinkObject.Save(3, &d.dentryEntry)
+ stateSinkObject.Save(4, &d.inode)
+}
+
+func (d *dentry) afterLoad() {}
+
+func (d *dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.parent)
+ stateSourceObject.Load(2, &d.name)
+ stateSourceObject.Load(3, &d.dentryEntry)
+ stateSourceObject.Load(4, &d.inode)
+}
+
+func (i *inode) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.inode"
+}
+
+func (i *inode) StateFields() []string {
+ return []string{
+ "fs",
+ "refs",
+ "xattrs",
+ "mode",
+ "nlink",
+ "uid",
+ "gid",
+ "ino",
+ "atime",
+ "ctime",
+ "mtime",
+ "locks",
+ "watches",
+ "impl",
+ }
+}
+
+func (i *inode) beforeSave() {}
+
+func (i *inode) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.fs)
+ stateSinkObject.Save(1, &i.refs)
+ stateSinkObject.Save(2, &i.xattrs)
+ stateSinkObject.Save(3, &i.mode)
+ stateSinkObject.Save(4, &i.nlink)
+ stateSinkObject.Save(5, &i.uid)
+ stateSinkObject.Save(6, &i.gid)
+ stateSinkObject.Save(7, &i.ino)
+ stateSinkObject.Save(8, &i.atime)
+ stateSinkObject.Save(9, &i.ctime)
+ stateSinkObject.Save(10, &i.mtime)
+ stateSinkObject.Save(11, &i.locks)
+ stateSinkObject.Save(12, &i.watches)
+ stateSinkObject.Save(13, &i.impl)
+}
+
+func (i *inode) afterLoad() {}
+
+func (i *inode) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.fs)
+ stateSourceObject.Load(1, &i.refs)
+ stateSourceObject.Load(2, &i.xattrs)
+ stateSourceObject.Load(3, &i.mode)
+ stateSourceObject.Load(4, &i.nlink)
+ stateSourceObject.Load(5, &i.uid)
+ stateSourceObject.Load(6, &i.gid)
+ stateSourceObject.Load(7, &i.ino)
+ stateSourceObject.Load(8, &i.atime)
+ stateSourceObject.Load(9, &i.ctime)
+ stateSourceObject.Load(10, &i.mtime)
+ stateSourceObject.Load(11, &i.locks)
+ stateSourceObject.Load(12, &i.watches)
+ stateSourceObject.Load(13, &i.impl)
+}
+
+func (fd *fileDescription) StateTypeName() string {
+ return "pkg/sentry/fsimpl/tmpfs.fileDescription"
+}
+
+func (fd *fileDescription) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "LockFD",
+ }
+}
+
+func (fd *fileDescription) beforeSave() {}
+
+func (fd *fileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.LockFD)
+}
+
+func (fd *fileDescription) afterLoad() {}
+
+func (fd *fileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.LockFD)
+}
+
+func init() {
+ state.Register((*dentryList)(nil))
+ state.Register((*dentryEntry)(nil))
+ state.Register((*deviceFile)(nil))
+ state.Register((*directory)(nil))
+ state.Register((*directoryFD)(nil))
+ state.Register((*inodeRefs)(nil))
+ state.Register((*namedPipe)(nil))
+ state.Register((*regularFile)(nil))
+ state.Register((*regularFileFD)(nil))
+ state.Register((*socketFile)(nil))
+ state.Register((*symlink)(nil))
+ state.Register((*FilesystemType)(nil))
+ state.Register((*filesystem)(nil))
+ state.Register((*FilesystemOpts)(nil))
+ state.Register((*dentry)(nil))
+ state.Register((*inode)(nil))
+ state.Register((*fileDescription)(nil))
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go b/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go
deleted file mode 100644
index fc5323abc..000000000
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tmpfs
-
-import (
- "fmt"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// nextFileID is used to generate unique file names.
-var nextFileID int64
-
-// newTmpfsRoot creates a new tmpfs mount, and returns the root. If the error
-// is not nil, then cleanup should be called when the root is no longer needed.
-func newTmpfsRoot(ctx context.Context) (*vfs.VirtualFilesystem, vfs.VirtualDentry, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
-
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("VFS init: %v", err)
- }
-
- vfsObj.MustRegisterFilesystemType("tmpfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, "", "tmpfs", &vfs.MountOptions{})
- if err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("failed to create tmpfs root mount: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
- return vfsObj, root, func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- }, nil
-}
-
-// newFileFD creates a new file in a new tmpfs mount, and returns the FD. If
-// the returned err is not nil, then cleanup should be called when the FD is no
-// longer needed.
-func newFileFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- filename := fmt.Sprintf("tmpfs-test-file-%d", atomic.AddInt64(&nextFileID, 1))
-
- // Create the file that will be write/read.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: linux.ModeRegular | mode,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create file %q: %v", filename, err)
- }
-
- return fd, cleanup, nil
-}
-
-// newDirFD is like newFileFD, but for directories.
-func newDirFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- dirname := fmt.Sprintf("tmpfs-test-dir-%d", atomic.AddInt64(&nextFileID, 1))
-
- // Create the dir.
- if err := vfsObj.MkdirAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(dirname),
- }, &vfs.MkdirOptions{
- Mode: linux.ModeDirectory | mode,
- }); err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create directory %q: %v", dirname, err)
- }
-
- // Open the dir and return it.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(dirname),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY | linux.O_DIRECTORY,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to open directory %q: %v", dirname, err)
- }
-
- return fd, cleanup, nil
-}
-
-// newPipeFD is like newFileFD, but for pipes.
-func newPipeFD(ctx context.Context, mode linux.FileMode) (*vfs.FileDescription, func(), error) {
- creds := auth.CredentialsFromContext(ctx)
- vfsObj, root, cleanup, err := newTmpfsRoot(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- name := fmt.Sprintf("tmpfs-test-%d", atomic.AddInt64(&nextFileID, 1))
-
- if err := vfsObj.MknodAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }, &vfs.MknodOptions{
- Mode: linux.ModeNamedPipe | mode,
- }); err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to create pipe %q: %v", name, err)
- }
-
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(name),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- cleanup()
- return nil, nil, fmt.Errorf("failed to open pipe %q: %v", name, err)
- }
-
- return fd, cleanup, nil
-}
diff --git a/pkg/sentry/fsimpl/verity/BUILD b/pkg/sentry/fsimpl/verity/BUILD
deleted file mode 100644
index e265be0ee..000000000
--- a/pkg/sentry/fsimpl/verity/BUILD
+++ /dev/null
@@ -1,51 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-go_library(
- name = "verity",
- srcs = [
- "filesystem.go",
- "save_restore.go",
- "verity.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/marshal/primitive",
- "//pkg/merkletree",
- "//pkg/refsvfs2",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
-
-go_test(
- name = "verity_test",
- srcs = [
- "verity_test.go",
- ],
- library = ":verity",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/arch",
- "//pkg/sentry/fsimpl/testutil",
- "//pkg/sentry/fsimpl/tmpfs",
- "//pkg/sentry/kernel",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/fsimpl/verity/filesystem.go b/pkg/sentry/fsimpl/verity/filesystem.go
deleted file mode 100644
index 2f6050cfd..000000000
--- a/pkg/sentry/fsimpl/verity/filesystem.go
+++ /dev/null
@@ -1,1046 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verity
-
-import (
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/merkletree"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// Sync implements vfs.FilesystemImpl.Sync.
-func (fs *filesystem) Sync(ctx context.Context) error {
- // All files should be read-only.
- return nil
-}
-
-var dentrySlicePool = sync.Pool{
- New: func() interface{} {
- ds := make([]*dentry, 0, 4) // arbitrary non-zero initial capacity
- return &ds
- },
-}
-
-func appendDentry(ds *[]*dentry, d *dentry) *[]*dentry {
- if ds == nil {
- ds = dentrySlicePool.Get().(*[]*dentry)
- }
- *ds = append(*ds, d)
- return ds
-}
-
-// Preconditions: ds != nil.
-func putDentrySlice(ds *[]*dentry) {
- // Allow dentries to be GC'd.
- for i := range *ds {
- (*ds)[i] = nil
- }
- *ds = (*ds)[:0]
- dentrySlicePool.Put(ds)
-}
-
-// renameMuRUnlockAndCheckDrop calls fs.renameMu.RUnlock(), then calls
-// dentry.checkDropLocked on all dentries in *ds with fs.renameMu locked for
-// writing.
-//
-// ds is a pointer-to-pointer since defer evaluates its arguments immediately,
-// but dentry slices are allocated lazily, and it's much easier to say "defer
-// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
-// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
-func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
- fs.renameMu.RUnlock()
- if *ds == nil {
- return
- }
- if len(**ds) != 0 {
- fs.renameMu.Lock()
- for _, d := range **ds {
- d.checkDropLocked(ctx)
- }
- fs.renameMu.Unlock()
- }
- putDentrySlice(*ds)
-}
-
-func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
- if *ds == nil {
- fs.renameMu.Unlock()
- return
- }
- for _, d := range **ds {
- d.checkDropLocked(ctx)
- }
- fs.renameMu.Unlock()
- putDentrySlice(*ds)
-}
-
-// stepLocked resolves rp.Component() to an existing file, starting from the
-// given directory.
-//
-// Dentries which may have a reference count of zero, and which therefore
-// should be dropped once traversal is complete, are appended to ds.
-//
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
-// !rp.Done().
-func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, error) {
- if !d.isDir() {
- return nil, syserror.ENOTDIR
- }
-
- if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
-
-afterSymlink:
- name := rp.Component()
- if name == "." {
- rp.Advance()
- return d, nil
- }
- if name == ".." {
- if isRoot, err := rp.CheckRoot(ctx, &d.vfsd); err != nil {
- return nil, err
- } else if isRoot || d.parent == nil {
- rp.Advance()
- return d, nil
- }
- if err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil {
- return nil, err
- }
- rp.Advance()
- return d.parent, nil
- }
- child, err := fs.getChildLocked(ctx, d, name, ds)
- if err != nil {
- return nil, err
- }
- if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
- return nil, err
- }
- if child.isSymlink() && mayFollowSymlinks && rp.ShouldFollowSymlink() {
- target, err := child.readlink(ctx)
- if err != nil {
- return nil, err
- }
- if err := rp.HandleSymlink(target); err != nil {
- return nil, err
- }
- goto afterSymlink // don't check the current directory again
- }
- rp.Advance()
- return child, nil
-}
-
-// verifyChild verifies the hash of child against the already verified hash of
-// the parent to ensure the child is expected. verifyChild triggers a sentry
-// panic if unexpected modifications to the file system are detected. In
-// noCrashOnVerificationFailure mode it returns a syserror instead.
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
-// TODO(b/166474175): Investigate all possible errors returned in this
-// function, and make sure we differentiate all errors that indicate unexpected
-// modifications to the file system from the ones that are not harmful.
-func (fs *filesystem) verifyChild(ctx context.Context, parent *dentry, child *dentry) (*dentry, error) {
- vfsObj := fs.vfsfs.VirtualFilesystem()
-
- // Get the path to the child dentry. This is only used to provide path
- // information in failure case.
- childPath, err := vfsObj.PathnameWithDeleted(ctx, child.fs.rootDentry.lowerVD, child.lowerVD)
- if err != nil {
- return nil, err
- }
-
- fs.verityMu.RLock()
- defer fs.verityMu.RUnlock()
- // Read the offset of the child from the extended attributes of the
- // corresponding Merkle tree file.
- // This is the offset of the hash for child in its parent's Merkle tree
- // file.
- off, err := vfsObj.GetXattrAt(ctx, fs.creds, &vfs.PathOperation{
- Root: child.lowerMerkleVD,
- Start: child.lowerMerkleVD,
- }, &vfs.GetXattrOptions{
- Name: merkleOffsetInParentXattr,
- Size: sizeOfStringInt32,
- })
-
- // The Merkle tree file for the child should have been created and
- // contains the expected xattrs. If the file or the xattr does not
- // exist, it indicates unexpected modifications to the file system.
- if err == syserror.ENOENT || err == syserror.ENODATA {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for %s: %v", merkleOffsetInParentXattr, childPath, err))
- }
- if err != nil {
- return nil, err
- }
- // The offset xattr should be an integer. If it's not, it indicates
- // unexpected modifications to the file system.
- offset, err := strconv.Atoi(off)
- if err != nil {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s for %s to int: %v", merkleOffsetInParentXattr, childPath, err))
- }
-
- // Open parent Merkle tree file to read and verify child's hash.
- parentMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: parent.lowerMerkleVD,
- Start: parent.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
-
- // The parent Merkle tree file should have been created. If it's
- // missing, it indicates an unexpected modification to the file system.
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to open parent Merkle file for %s: %v", childPath, err))
- }
- if err != nil {
- return nil, err
- }
-
- // dataSize is the size of raw data for the Merkle tree. For a file,
- // dataSize is the size of the whole file. For a directory, dataSize is
- // the size of all its children's hashes.
- dataSize, err := parentMerkleFD.GetXattr(ctx, &vfs.GetXattrOptions{
- Name: merkleSizeXattr,
- Size: sizeOfStringInt32,
- })
-
- // The Merkle tree file for the child should have been created and
- // contains the expected xattrs. If the file or the xattr does not
- // exist, it indicates unexpected modifications to the file system.
- if err == syserror.ENOENT || err == syserror.ENODATA {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for %s: %v", merkleSizeXattr, childPath, err))
- }
- if err != nil {
- return nil, err
- }
-
- // The dataSize xattr should be an integer. If it's not, it indicates
- // unexpected modifications to the file system.
- parentSize, err := strconv.Atoi(dataSize)
- if err != nil {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s for %s to int: %v", merkleSizeXattr, childPath, err))
- }
-
- fdReader := vfs.FileReadWriteSeeker{
- FD: parentMerkleFD,
- Ctx: ctx,
- }
-
- parentStat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
- Root: parent.lowerVD,
- Start: parent.lowerVD,
- }, &vfs.StatOptions{})
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to get parent stat for %s: %v", childPath, err))
- }
- if err != nil {
- return nil, err
- }
-
- // Since we are verifying against a directory Merkle tree, buf should
- // contain the hash of the children in the parent Merkle tree when
- // Verify returns with success.
- var buf bytes.Buffer
- if _, err := merkletree.Verify(&merkletree.VerifyParams{
- Out: &buf,
- File: &fdReader,
- Tree: &fdReader,
- Size: int64(parentSize),
- Name: parent.name,
- Mode: uint32(parentStat.Mode),
- UID: parentStat.UID,
- GID: parentStat.GID,
- //TODO(b/156980949): Support passing other hash algorithms.
- HashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- ReadOffset: int64(offset),
- ReadSize: int64(merkletree.DigestSize(linux.FS_VERITY_HASH_ALG_SHA256)),
- Expected: parent.hash,
- DataAndTreeInSameFile: true,
- }); err != nil && err != io.EOF {
- return nil, alertIntegrityViolation(fmt.Sprintf("Verification for %s failed: %v", childPath, err))
- }
-
- // Cache child hash when it's verified the first time.
- if len(child.hash) == 0 {
- child.hash = buf.Bytes()
- }
- return child, nil
-}
-
-// verifyStat verifies the stat against the verified hash. The mode/uid/gid of
-// the file is cached after verified.
-func (fs *filesystem) verifyStat(ctx context.Context, d *dentry, stat linux.Statx) error {
- vfsObj := fs.vfsfs.VirtualFilesystem()
-
- // Get the path to the child dentry. This is only used to provide path
- // information in failure case.
- childPath, err := vfsObj.PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.lowerVD)
- if err != nil {
- return err
- }
-
- fs.verityMu.RLock()
- defer fs.verityMu.RUnlock()
-
- fd, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: d.lowerMerkleVD,
- Start: d.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err == syserror.ENOENT {
- return alertIntegrityViolation(fmt.Sprintf("Failed to open merkle file for %s: %v", childPath, err))
- }
- if err != nil {
- return err
- }
-
- merkleSize, err := fd.GetXattr(ctx, &vfs.GetXattrOptions{
- Name: merkleSizeXattr,
- Size: sizeOfStringInt32,
- })
-
- if err == syserror.ENODATA {
- return alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", merkleSizeXattr, childPath, err))
- }
- if err != nil {
- return err
- }
-
- size, err := strconv.Atoi(merkleSize)
- if err != nil {
- return alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s for %s to int: %v", merkleSizeXattr, childPath, err))
- }
-
- fdReader := vfs.FileReadWriteSeeker{
- FD: fd,
- Ctx: ctx,
- }
-
- var buf bytes.Buffer
- params := &merkletree.VerifyParams{
- Out: &buf,
- Tree: &fdReader,
- Size: int64(size),
- Name: d.name,
- Mode: uint32(stat.Mode),
- UID: stat.UID,
- GID: stat.GID,
- //TODO(b/156980949): Support passing other hash algorithms.
- HashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- ReadOffset: 0,
- // Set read size to 0 so only the metadata is verified.
- ReadSize: 0,
- Expected: d.hash,
- DataAndTreeInSameFile: false,
- }
- if atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFDIR {
- params.DataAndTreeInSameFile = true
- }
-
- if _, err := merkletree.Verify(params); err != nil && err != io.EOF {
- return alertIntegrityViolation(fmt.Sprintf("Verification stat for %s failed: %v", childPath, err))
- }
- d.mode = uint32(stat.Mode)
- d.uid = stat.UID
- d.gid = stat.GID
- d.size = uint32(size)
- return nil
-}
-
-// Preconditions: fs.renameMu must be locked. d.dirMu must be locked.
-func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name string, ds **[]*dentry) (*dentry, error) {
- if child, ok := parent.children[name]; ok {
- // If verity is enabled on child, we should check again whether
- // the file and the corresponding Merkle tree are as expected,
- // in order to catch deletion/renaming after the last time it's
- // accessed.
- if child.verityEnabled() {
- vfsObj := fs.vfsfs.VirtualFilesystem()
- // Get the path to the child dentry. This is only used
- // to provide path information in failure case.
- path, err := vfsObj.PathnameWithDeleted(ctx, child.fs.rootDentry.lowerVD, child.lowerVD)
- if err != nil {
- return nil, err
- }
-
- childVD, err := parent.getLowerAt(ctx, vfsObj, name)
- if err == syserror.ENOENT {
- // The file was previously accessed. If the
- // file does not exist now, it indicates an
- // unexpected modification to the file system.
- return nil, alertIntegrityViolation(fmt.Sprintf("Target file %s is expected but missing", path))
- }
- if err != nil {
- return nil, err
- }
- defer childVD.DecRef(ctx)
-
- childMerkleVD, err := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
- // The Merkle tree file was previous accessed. If it
- // does not exist now, it indicates an unexpected
- // modification to the file system.
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("Expected Merkle file for target %s but none found", path))
- }
- if err != nil {
- return nil, err
- }
-
- defer childMerkleVD.DecRef(ctx)
- }
-
- // If enabling verification on files/directories is not allowed
- // during runtime, all cached children are already verified. If
- // runtime enable is allowed and the parent directory is
- // enabled, we should verify the child hash here because it may
- // be cached before enabled.
- if fs.allowRuntimeEnable {
- if parent.verityEnabled() {
- if _, err := fs.verifyChild(ctx, parent, child); err != nil {
- return nil, err
- }
- }
- if child.verityEnabled() {
- vfsObj := fs.vfsfs.VirtualFilesystem()
- mask := uint32(linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID)
- stat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
- Root: child.lowerVD,
- Start: child.lowerVD,
- }, &vfs.StatOptions{
- Mask: mask,
- })
- if err != nil {
- return nil, err
- }
- if err := fs.verifyStat(ctx, child, stat); err != nil {
- return nil, err
- }
- }
- }
- return child, nil
- }
- child, err := fs.lookupAndVerifyLocked(ctx, parent, name)
- if err != nil {
- return nil, err
- }
- if parent.children == nil {
- parent.children = make(map[string]*dentry)
- }
- parent.children[name] = child
- // child's refcount is initially 0, so it may be dropped after traversal.
- *ds = appendDentry(*ds, child)
- return child, nil
-}
-
-// Preconditions: fs.renameMu must be locked. parent.dirMu must be locked.
-func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry, name string) (*dentry, error) {
- vfsObj := fs.vfsfs.VirtualFilesystem()
-
- childVD, childErr := parent.getLowerAt(ctx, vfsObj, name)
- // We will handle ENOENT separately, as it may indicate unexpected
- // modifications to the file system, and may cause a sentry panic.
- if childErr != nil && childErr != syserror.ENOENT {
- return nil, childErr
- }
-
- // The dentry needs to be cleaned up if any error occurs. IncRef will be
- // called if a verity child dentry is successfully created.
- if childErr == nil {
- defer childVD.DecRef(ctx)
- }
-
- childMerkleVD, childMerkleErr := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
- // We will handle ENOENT separately, as it may indicate unexpected
- // modifications to the file system, and may cause a sentry panic.
- if childMerkleErr != nil && childMerkleErr != syserror.ENOENT {
- return nil, childMerkleErr
- }
-
- // The dentry needs to be cleaned up if any error occurs. IncRef will be
- // called if a verity child dentry is successfully created.
- if childMerkleErr == nil {
- defer childMerkleVD.DecRef(ctx)
- }
-
- // Get the path to the parent dentry. This is only used to provide path
- // information in failure case.
- parentPath, err := vfsObj.PathnameWithDeleted(ctx, parent.fs.rootDentry.lowerVD, parent.lowerVD)
- if err != nil {
- return nil, err
- }
-
- // TODO(b/166474175): Investigate all possible errors of childErr and
- // childMerkleErr, and make sure we differentiate all errors that
- // indicate unexpected modifications to the file system from the ones
- // that are not harmful.
- if childErr == syserror.ENOENT && childMerkleErr == nil {
- // Failed to get child file/directory dentry. However the
- // corresponding Merkle tree is found. This indicates an
- // unexpected modification to the file system that
- // removed/renamed the child.
- return nil, alertIntegrityViolation(fmt.Sprintf("Target file %s is expected but missing", parentPath+"/"+name))
- } else if childErr == nil && childMerkleErr == syserror.ENOENT {
- // If in allowRuntimeEnable mode, and the Merkle tree file is
- // not created yet, we create an empty Merkle tree file, so that
- // if the file is enabled through ioctl, we have the Merkle tree
- // file open and ready to use.
- // This may cause empty and unused Merkle tree files in
- // allowRuntimeEnable mode, if they are never enabled. This
- // does not affect verification, as we rely on cached hash to
- // decide whether to perform verification, not the existence of
- // the Merkle tree file. Also, those Merkle tree files are
- // always hidden and cannot be accessed by verity fs users.
- if fs.allowRuntimeEnable {
- childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: parent.lowerVD,
- Start: parent.lowerVD,
- Path: fspath.Parse(merklePrefix + name),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT,
- Mode: 0644,
- })
- if err != nil {
- return nil, err
- }
- childMerkleFD.DecRef(ctx)
- childMerkleVD, err = parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
- if err != nil {
- return nil, err
- }
- } else {
- // If runtime enable is not allowed. This indicates an
- // unexpected modification to the file system that
- // removed/renamed the Merkle tree file.
- return nil, alertIntegrityViolation(fmt.Sprintf("Expected Merkle file for target %s but none found", parentPath+"/"+name))
- }
- } else if childErr == syserror.ENOENT && childMerkleErr == syserror.ENOENT {
- // Both the child and the corresponding Merkle tree are missing.
- // This could be an unexpected modification or due to incorrect
- // parameter.
- // TODO(b/167752508): Investigate possible ways to differentiate
- // cases that both files are deleted from cases that they never
- // exist in the file system.
- return nil, alertIntegrityViolation(fmt.Sprintf("Failed to find file %s", parentPath+"/"+name))
- }
-
- mask := uint32(linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID)
- stat, err := vfsObj.StatAt(ctx, fs.creds, &vfs.PathOperation{
- Root: childVD,
- Start: childVD,
- }, &vfs.StatOptions{
- Mask: mask,
- })
- if err != nil {
- return nil, err
- }
-
- child := fs.newDentry()
- child.lowerVD = childVD
- child.lowerMerkleVD = childMerkleVD
-
- // Increase the reference for both childVD and childMerkleVD as they are
- // held by child. If this function fails and the child is destroyed, the
- // references will be decreased in destroyLocked.
- childVD.IncRef()
- childMerkleVD.IncRef()
-
- parent.IncRef()
- child.parent = parent
- child.name = name
-
- child.mode = uint32(stat.Mode)
- child.uid = stat.UID
- child.gid = stat.GID
-
- // Verify child hash. This should always be performed unless in
- // allowRuntimeEnable mode and the parent directory hasn't been enabled
- // yet.
- if parent.verityEnabled() {
- if _, err := fs.verifyChild(ctx, parent, child); err != nil {
- child.destroyLocked(ctx)
- return nil, err
- }
- }
- if child.verityEnabled() {
- if err := fs.verifyStat(ctx, child, stat); err != nil {
- child.destroyLocked(ctx)
- return nil, err
- }
- }
-
- return child, nil
-}
-
-// walkParentDirLocked resolves all but the last path component of rp to an
-// existing directory, starting from the given directory (which is usually
-// rp.Start().Impl().(*dentry)). It does not check that the returned directory
-// is searchable by the provider of rp.
-//
-// Preconditions: fs.renameMu must be locked. !rp.Done().
-func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, ds **[]*dentry) (*dentry, error) {
- for !rp.Final() {
- d.dirMu.Lock()
- next, err := fs.stepLocked(ctx, rp, d, true /* mayFollowSymlinks */, ds)
- d.dirMu.Unlock()
- if err != nil {
- return nil, err
- }
- d = next
- }
- if !d.isDir() {
- return nil, syserror.ENOTDIR
- }
- return d, nil
-}
-
-// resolveLocked resolves rp to an existing file.
-//
-// Preconditions: fs.renameMu must be locked.
-func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath, ds **[]*dentry) (*dentry, error) {
- d := rp.Start().Impl().(*dentry)
- for !rp.Done() {
- d.dirMu.Lock()
- next, err := fs.stepLocked(ctx, rp, d, true /* mayFollowSymlinks */, ds)
- d.dirMu.Unlock()
- if err != nil {
- return nil, err
- }
- d = next
- }
- if rp.MustBeDir() && !d.isDir() {
- return nil, syserror.ENOTDIR
- }
- return d, nil
-}
-
-// AccessAt implements vfs.Filesystem.Impl.AccessAt.
-func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
- // Verity file system is read-only.
- if ats&vfs.MayWrite != 0 {
- return syserror.EROFS
- }
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return err
- }
- return d.checkPermissions(creds, ats)
-}
-
-// GetDentryAt implements vfs.FilesystemImpl.GetDentryAt.
-func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return nil, err
- }
- if opts.CheckSearchable {
- if !d.isDir() {
- return nil, syserror.ENOTDIR
- }
- if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
- }
- d.IncRef()
- return &d.vfsd, nil
-}
-
-// GetParentDentryAt implements vfs.FilesystemImpl.GetParentDentryAt.
-func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- start := rp.Start().Impl().(*dentry)
- d, err := fs.walkParentDirLocked(ctx, rp, start, &ds)
- if err != nil {
- return nil, err
- }
- d.IncRef()
- return &d.vfsd, nil
-}
-
-// LinkAt implements vfs.FilesystemImpl.LinkAt.
-func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
-func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// MknodAt implements vfs.FilesystemImpl.MknodAt.
-func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// OpenAt implements vfs.FilesystemImpl.OpenAt.
-func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- // Verity fs is read-only.
- if opts.Flags&(linux.O_WRONLY|linux.O_CREAT) != 0 {
- return nil, syserror.EROFS
- }
-
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
-
- start := rp.Start().Impl().(*dentry)
- if rp.Done() {
- return start.openLocked(ctx, rp, &opts)
- }
-
-afterTrailingSymlink:
- parent, err := fs.walkParentDirLocked(ctx, rp, start, &ds)
- if err != nil {
- return nil, err
- }
-
- // Check for search permission in the parent directory.
- if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
-
- // Open existing child or follow symlink.
- parent.dirMu.Lock()
- child, err := fs.stepLocked(ctx, rp, parent, false /*mayFollowSymlinks*/, &ds)
- parent.dirMu.Unlock()
- if err != nil {
- return nil, err
- }
- if child.isSymlink() && rp.ShouldFollowSymlink() {
- target, err := child.readlink(ctx)
- if err != nil {
- return nil, err
- }
- if err := rp.HandleSymlink(target); err != nil {
- return nil, err
- }
- start = parent
- goto afterTrailingSymlink
- }
- return child.openLocked(ctx, rp, &opts)
-}
-
-// Preconditions: fs.renameMu must be locked.
-func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
- // Users should not open the Merkle tree files. Those are for verity fs
- // use only.
- if strings.Contains(d.name, merklePrefix) {
- return nil, syserror.EPERM
- }
- ats := vfs.AccessTypesForOpenFlags(opts)
- if err := d.checkPermissions(rp.Credentials(), ats); err != nil {
- return nil, err
- }
-
- // Verity fs is read-only.
- if ats&vfs.MayWrite != 0 {
- return nil, syserror.EROFS
- }
-
- // Get the path to the target file. This is only used to provide path
- // information in failure case.
- path, err := d.fs.vfsfs.VirtualFilesystem().PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.lowerVD)
- if err != nil {
- return nil, err
- }
-
- // Open the file in the underlying file system.
- lowerFD, err := rp.VirtualFilesystem().OpenAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- }, opts)
-
- // The file should exist, as we succeeded in finding its dentry. If it's
- // missing, it indicates an unexpected modification to the file system.
- if err != nil {
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("File %s expected but not found", path))
- }
- return nil, err
- }
-
- // lowerFD needs to be cleaned up if any error occurs. IncRef will be
- // called if a verity FD is successfully created.
- defer lowerFD.DecRef(ctx)
-
- // Open the Merkle tree file corresponding to the current file/directory
- // to be used later for verifying Read/Walk.
- merkleReader, err := rp.VirtualFilesystem().OpenAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerMerkleVD,
- Start: d.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
-
- // The Merkle tree file should exist, as we succeeded in finding its
- // dentry. If it's missing, it indicates an unexpected modification to
- // the file system.
- if err != nil {
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", path))
- }
- return nil, err
- }
-
- // merkleReader needs to be cleaned up if any error occurs. IncRef will
- // be called if a verity FD is successfully created.
- defer merkleReader.DecRef(ctx)
-
- lowerFlags := lowerFD.StatusFlags()
- lowerFDOpts := lowerFD.Options()
- var merkleWriter *vfs.FileDescription
- var parentMerkleWriter *vfs.FileDescription
-
- // Only open the Merkle tree files for write if in allowRuntimeEnable
- // mode.
- if d.fs.allowRuntimeEnable {
- merkleWriter, err = rp.VirtualFilesystem().OpenAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerMerkleVD,
- Start: d.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_WRONLY | linux.O_APPEND,
- })
- if err != nil {
- if err == syserror.ENOENT {
- return nil, alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", path))
- }
- return nil, err
- }
- // merkleWriter is cleaned up if any error occurs. IncRef will
- // be called if a verity FD is created successfully.
- defer merkleWriter.DecRef(ctx)
-
- if d.parent != nil {
- parentMerkleWriter, err = rp.VirtualFilesystem().OpenAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.parent.lowerMerkleVD,
- Start: d.parent.lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_WRONLY | linux.O_APPEND,
- })
- if err != nil {
- if err == syserror.ENOENT {
- parentPath, _ := d.fs.vfsfs.VirtualFilesystem().PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.parent.lowerVD)
- return nil, alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", parentPath))
- }
- return nil, err
- }
- // parentMerkleWriter is cleaned up if any error occurs. IncRef
- // will be called if a verity FD is created successfully.
- defer parentMerkleWriter.DecRef(ctx)
- }
- }
-
- fd := &fileDescription{
- d: d,
- lowerFD: lowerFD,
- merkleReader: merkleReader,
- merkleWriter: merkleWriter,
- parentMerkleWriter: parentMerkleWriter,
- isDir: d.isDir(),
- }
-
- if err := fd.vfsfd.Init(fd, lowerFlags, rp.Mount(), &d.vfsd, &lowerFDOpts); err != nil {
- return nil, err
- }
- lowerFD.IncRef()
- merkleReader.IncRef()
- if merkleWriter != nil {
- merkleWriter.IncRef()
- }
- if parentMerkleWriter != nil {
- parentMerkleWriter.IncRef()
- }
- return &fd.vfsfd, err
-}
-
-// ReadlinkAt implements vfs.FilesystemImpl.ReadlinkAt.
-func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return "", err
- }
- //TODO(b/162787271): Provide integrity check for ReadlinkAt.
- return fs.vfsfs.VirtualFilesystem().ReadlinkAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- })
-}
-
-// RenameAt implements vfs.FilesystemImpl.RenameAt.
-func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// RmdirAt implements vfs.FilesystemImpl.RmdirAt.
-func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// SetStatAt implements vfs.FilesystemImpl.SetStatAt.
-func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// StatAt implements vfs.FilesystemImpl.StatAt.
-// TODO(b/170157489): Investigate whether stats other than Mode/UID/GID should
-// be verified.
-func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return linux.Statx{}, err
- }
-
- var stat linux.Statx
- stat, err = fs.vfsfs.VirtualFilesystem().StatAt(ctx, fs.creds, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- }, &opts)
- if err != nil {
- return linux.Statx{}, err
- }
- if d.verityEnabled() {
- if err := fs.verifyStat(ctx, d, stat); err != nil {
- return linux.Statx{}, err
- }
- }
- return stat, nil
-}
-
-// StatFSAt implements vfs.FilesystemImpl.StatFSAt.
-func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linux.Statfs, error) {
- // TODO(b/159261227): Implement StatFSAt.
- return linux.Statfs{}, nil
-}
-
-// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
-func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.
-func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt.
-func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- if _, err := fs.resolveLocked(ctx, rp, &ds); err != nil {
- return nil, err
- }
- return nil, syserror.ECONNREFUSED
-}
-
-// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
-func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return nil, err
- }
- lowerVD := d.lowerVD
- return fs.vfsfs.VirtualFilesystem().ListXattrAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- }, size)
-}
-
-// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.
-func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) {
- var ds *[]*dentry
- fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
- d, err := fs.resolveLocked(ctx, rp, &ds)
- if err != nil {
- return "", err
- }
- lowerVD := d.lowerVD
- return fs.vfsfs.VirtualFilesystem().GetXattrAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- }, &opts)
-}
-
-// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
-func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
-func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {
- // Verity file system is read-only.
- return syserror.EROFS
-}
-
-// PrependPath implements vfs.FilesystemImpl.PrependPath.
-func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDentry, b *fspath.Builder) error {
- fs.renameMu.RLock()
- defer fs.renameMu.RUnlock()
- mnt := vd.Mount()
- d := vd.Dentry().Impl().(*dentry)
- for {
- if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
- return vfs.PrependPathAtVFSRootError{}
- }
- if &d.vfsd == mnt.Root() {
- return nil
- }
- if d.parent == nil {
- return vfs.PrependPathAtNonMountRootError{}
- }
- b.PrependComponent(d.name)
- d = d.parent
- }
-}
diff --git a/pkg/sentry/fsimpl/verity/save_restore.go b/pkg/sentry/fsimpl/verity/save_restore.go
deleted file mode 100644
index 46b064342..000000000
--- a/pkg/sentry/fsimpl/verity/save_restore.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verity
-
-import (
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/refsvfs2"
-)
-
-func (d *dentry) afterLoad() {
- if atomic.LoadInt64(&d.refs) != -1 {
- refsvfs2.Register(d)
- }
-}
diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go
deleted file mode 100644
index de92878fd..000000000
--- a/pkg/sentry/fsimpl/verity/verity.go
+++ /dev/null
@@ -1,906 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package verity provides a filesystem implementation that is a wrapper of
-// another file system.
-// The verity file system provides integrity check for the underlying file
-// system by providing verification for path traversals and each read.
-// The verity file system is read-only, except for one case: when
-// allowRuntimeEnable is true, additional Merkle files can be generated using
-// the FS_IOC_ENABLE_VERITY ioctl.
-package verity
-
-import (
- "fmt"
- "math"
- "strconv"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/merkletree"
- "gvisor.dev/gvisor/pkg/refsvfs2"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- // Name is the default filesystem name.
- Name = "verity"
-
- // merklePrefix is the prefix of the Merkle tree files. For example, the Merkle
- // tree file for "/foo" is "/.merkle.verity.foo".
- merklePrefix = ".merkle.verity."
-
- // merkleOffsetInParentXattr is the extended attribute name specifying the
- // offset of the child hash in its parent's Merkle tree.
- merkleOffsetInParentXattr = "user.merkle.offset"
-
- // merkleSizeXattr is the extended attribute name specifying the size of data
- // hashed by the corresponding Merkle tree. For a regular file, this is the
- // file size. For a directory, this is the size of all its children's hashes.
- merkleSizeXattr = "user.merkle.size"
-
- // sizeOfStringInt32 is the size for a 32 bit integer stored as string in
- // extended attributes. The maximum value of a 32 bit integer has 10 digits.
- sizeOfStringInt32 = 10
-)
-
-var (
- // noCrashOnVerificationFailure indicates whether the sandbox should panic
- // whenever verification fails. If true, an error is returned instead of
- // panicking. This should only be set for tests.
- //
- // TODO(b/165661693): Decide whether to panic or return error based on this
- // flag.
- noCrashOnVerificationFailure bool
-
- // verityMu synchronizes concurrent operations that enable verity and perform
- // verification checks.
- verityMu sync.RWMutex
-)
-
-// FilesystemType implements vfs.FilesystemType.
-//
-// +stateify savable
-type FilesystemType struct{}
-
-// filesystem implements vfs.FilesystemImpl.
-//
-// +stateify savable
-type filesystem struct {
- vfsfs vfs.Filesystem
-
- // creds is a copy of the filesystem's creator's credentials, which are
- // used for accesses to the underlying file system. creds is immutable.
- creds *auth.Credentials
-
- // allowRuntimeEnable is true if using ioctl with FS_IOC_ENABLE_VERITY
- // to build Merkle trees in the verity file system is allowed. If this
- // is false, no new Merkle trees can be built, and only the files that
- // had Merkle trees before startup (e.g. from a host filesystem mounted
- // with gofer fs) can be verified.
- allowRuntimeEnable bool
-
- // lowerMount is the underlying file system mount.
- lowerMount *vfs.Mount
-
- // rootDentry is the mount root Dentry for this file system, which
- // stores the root hash of the whole file system in bytes.
- rootDentry *dentry
-
- // renameMu synchronizes renaming with non-renaming operations in order
- // to ensure consistent lock ordering between dentry.dirMu in different
- // dentries.
- renameMu sync.RWMutex `state:"nosave"`
-
- // verityMu synchronizes enabling verity files, protects files or
- // directories from being enabled by different threads simultaneously.
- // It also ensures that verity does not access files that are being
- // enabled.
- //
- // Also, the directory Merkle trees depends on the generated trees of
- // its children. So they shouldn't be enabled the same time. This lock
- // is for the whole file system to ensure that no more than one file is
- // enabled the same time.
- verityMu sync.RWMutex
-}
-
-// InternalFilesystemOptions may be passed as
-// vfs.GetFilesystemOptions.InternalData to FilesystemType.GetFilesystem.
-//
-// +stateify savable
-type InternalFilesystemOptions struct {
- // RootMerkleFileName is the name of the verity root Merkle tree file.
- RootMerkleFileName string
-
- // LowerName is the name of the filesystem wrapped by verity fs.
- LowerName string
-
- // RootHash is the root hash of the overall verity file system.
- RootHash []byte
-
- // AllowRuntimeEnable specifies whether the verity file system allows
- // enabling verification for files (i.e. building Merkle trees) during
- // runtime.
- AllowRuntimeEnable bool
-
- // LowerGetFSOptions is the file system option for the lower layer file
- // system wrapped by verity file system.
- LowerGetFSOptions vfs.GetFilesystemOptions
-
- // NoCrashOnVerificationFailure indicates whether the sandbox should
- // panic whenever verification fails. If true, an error is returned
- // instead of panicking. This should only be set for tests.
- NoCrashOnVerificationFailure bool
-}
-
-// Name implements vfs.FilesystemType.Name.
-func (FilesystemType) Name() string {
- return Name
-}
-
-// Release implements vfs.FilesystemType.Release.
-func (FilesystemType) Release(ctx context.Context) {}
-
-// alertIntegrityViolation alerts a violation of integrity, which usually means
-// unexpected modification to the file system is detected. In
-// noCrashOnVerificationFailure mode, it returns EIO, otherwise it panic.
-func alertIntegrityViolation(msg string) error {
- if noCrashOnVerificationFailure {
- return syserror.EIO
- }
- panic(msg)
-}
-
-// GetFilesystem implements vfs.FilesystemType.GetFilesystem.
-func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- iopts, ok := opts.InternalData.(InternalFilesystemOptions)
- if !ok {
- ctx.Warningf("verity.FilesystemType.GetFilesystem: missing verity configs")
- return nil, nil, syserror.EINVAL
- }
- noCrashOnVerificationFailure = iopts.NoCrashOnVerificationFailure
-
- // Mount the lower file system. The lower file system is wrapped inside
- // verity, and should not be exposed or connected.
- mopts := &vfs.MountOptions{
- GetFilesystemOptions: iopts.LowerGetFSOptions,
- InternalMount: true,
- }
- mnt, err := vfsObj.MountDisconnected(ctx, creds, "", iopts.LowerName, mopts)
- if err != nil {
- return nil, nil, err
- }
-
- fs := &filesystem{
- creds: creds.Fork(),
- lowerMount: mnt,
- allowRuntimeEnable: iopts.AllowRuntimeEnable,
- }
- fs.vfsfs.Init(vfsObj, &fstype, fs)
-
- // Construct the root dentry.
- d := fs.newDentry()
- d.refs = 1
- lowerVD := vfs.MakeVirtualDentry(mnt, mnt.Root())
- lowerVD.IncRef()
- d.lowerVD = lowerVD
-
- rootMerkleName := merklePrefix + iopts.RootMerkleFileName
-
- lowerMerkleVD, err := vfsObj.GetDentryAt(ctx, fs.creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- Path: fspath.Parse(rootMerkleName),
- }, &vfs.GetDentryOptions{})
-
- // If runtime enable is allowed, the root merkle tree may be absent. We
- // should create the tree file.
- if err == syserror.ENOENT && fs.allowRuntimeEnable {
- lowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- Path: fspath.Parse(rootMerkleName),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT,
- Mode: 0644,
- })
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- d.DecRef(ctx)
- return nil, nil, err
- }
- lowerMerkleFD.DecRef(ctx)
- lowerMerkleVD, err = vfsObj.GetDentryAt(ctx, fs.creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- Path: fspath.Parse(rootMerkleName),
- }, &vfs.GetDentryOptions{})
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- d.DecRef(ctx)
- return nil, nil, err
- }
- } else if err != nil {
- // Failed to get dentry for the root Merkle file. This
- // indicates an unexpected modification that removed/renamed
- // the root Merkle file, or it's never generated.
- fs.vfsfs.DecRef(ctx)
- d.DecRef(ctx)
- return nil, nil, alertIntegrityViolation("Failed to find root Merkle file")
- }
- d.lowerMerkleVD = lowerMerkleVD
-
- // Get metadata from the underlying file system.
- const statMask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID
- stat, err := vfsObj.StatAt(ctx, creds, &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- }, &vfs.StatOptions{
- Mask: statMask,
- })
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- d.DecRef(ctx)
- return nil, nil, err
- }
-
- d.mode = uint32(stat.Mode)
- d.uid = stat.UID
- d.gid = stat.GID
- d.hash = make([]byte, len(iopts.RootHash))
-
- if !fs.allowRuntimeEnable {
- if err := fs.verifyStat(ctx, d, stat); err != nil {
- return nil, nil, err
- }
- }
-
- copy(d.hash, iopts.RootHash)
- d.vfsd.Init(d)
-
- fs.rootDentry = d
-
- return &fs.vfsfs, &d.vfsd, nil
-}
-
-// Release implements vfs.FilesystemImpl.Release.
-func (fs *filesystem) Release(ctx context.Context) {
- fs.lowerMount.DecRef(ctx)
-}
-
-// dentry implements vfs.DentryImpl.
-//
-// +stateify savable
-type dentry struct {
- vfsd vfs.Dentry
-
- refs int64
-
- // fs is the owning filesystem. fs is immutable.
- fs *filesystem
-
- // mode, uid, gid and size are the file mode, owner, group, and size of
- // the file in the underlying file system.
- mode uint32
- uid uint32
- gid uint32
- size uint32
-
- // parent is the dentry corresponding to this dentry's parent directory.
- // name is this dentry's name in parent. If this dentry is a filesystem
- // root, parent is nil and name is the empty string. parent and name are
- // protected by fs.renameMu.
- parent *dentry
- name string
-
- // If this dentry represents a directory, children maps the names of
- // children for which dentries have been instantiated to those dentries,
- // and dirents (if not nil) is a cache of dirents as returned by
- // directoryFDs representing this directory. children is protected by
- // dirMu.
- dirMu sync.Mutex `state:"nosave"`
- children map[string]*dentry
-
- // lowerVD is the VirtualDentry in the underlying file system.
- lowerVD vfs.VirtualDentry
-
- // lowerMerkleVD is the VirtualDentry of the corresponding Merkle tree
- // in the underlying file system.
- lowerMerkleVD vfs.VirtualDentry
-
- // hash is the calculated hash for the current file or directory.
- hash []byte
-}
-
-// newDentry creates a new dentry representing the given verity file. The
-// dentry initially has no references; it is the caller's responsibility to set
-// the dentry's reference count and/or call dentry.destroy() as appropriate.
-// The dentry is initially invalid in that it contains no underlying dentry;
-// the caller is responsible for setting them.
-func (fs *filesystem) newDentry() *dentry {
- d := &dentry{
- fs: fs,
- }
- d.vfsd.Init(d)
- refsvfs2.Register(d)
- return d
-}
-
-// IncRef implements vfs.DentryImpl.IncRef.
-func (d *dentry) IncRef() {
- r := atomic.AddInt64(&d.refs, 1)
- refsvfs2.LogIncRef(d, r)
-}
-
-// TryIncRef implements vfs.DentryImpl.TryIncRef.
-func (d *dentry) TryIncRef() bool {
- for {
- r := atomic.LoadInt64(&d.refs)
- if r <= 0 {
- return false
- }
- if atomic.CompareAndSwapInt64(&d.refs, r, r+1) {
- refsvfs2.LogTryIncRef(d, r+1)
- return true
- }
- }
-}
-
-// DecRef implements vfs.DentryImpl.DecRef.
-func (d *dentry) DecRef(ctx context.Context) {
- r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
- if r == 0 {
- d.fs.renameMu.Lock()
- d.checkDropLocked(ctx)
- d.fs.renameMu.Unlock()
- } else if r < 0 {
- panic("verity.dentry.DecRef() called without holding a reference")
- }
-}
-
-func (d *dentry) decRefLocked(ctx context.Context) {
- r := atomic.AddInt64(&d.refs, -1)
- refsvfs2.LogDecRef(d, r)
- if r == 0 {
- d.checkDropLocked(ctx)
- } else if r < 0 {
- panic("verity.dentry.decRefLocked() called without holding a reference")
- }
-}
-
-// checkDropLocked should be called after d's reference count becomes 0 or it
-// becomes deleted.
-func (d *dentry) checkDropLocked(ctx context.Context) {
- // Dentries with a positive reference count must be retained. Dentries
- // with a negative reference count have already been destroyed.
- if atomic.LoadInt64(&d.refs) != 0 {
- return
- }
- // Refs is still zero; destroy it.
- d.destroyLocked(ctx)
- return
-}
-
-// destroyLocked destroys the dentry.
-//
-// Preconditions: d.fs.renameMu must be locked for writing. d.refs == 0.
-func (d *dentry) destroyLocked(ctx context.Context) {
- switch atomic.LoadInt64(&d.refs) {
- case 0:
- // Mark the dentry destroyed.
- atomic.StoreInt64(&d.refs, -1)
- case -1:
- panic("verity.dentry.destroyLocked() called on already destroyed dentry")
- default:
- panic("verity.dentry.destroyLocked() called with references on the dentry")
- }
-
- if d.lowerVD.Ok() {
- d.lowerVD.DecRef(ctx)
- }
- if d.lowerMerkleVD.Ok() {
- d.lowerMerkleVD.DecRef(ctx)
- }
- if d.parent != nil {
- d.parent.dirMu.Lock()
- if !d.vfsd.IsDead() {
- delete(d.parent.children, d.name)
- }
- d.parent.dirMu.Unlock()
- d.parent.decRefLocked(ctx)
- }
- refsvfs2.Unregister(d)
-}
-
-// RefType implements refsvfs2.CheckedObject.Type.
-func (d *dentry) RefType() string {
- return "verity.dentry"
-}
-
-// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
-func (d *dentry) LeakMessage() string {
- return fmt.Sprintf("[verity.dentry %p] reference count of %d instead of -1", d, atomic.LoadInt64(&d.refs))
-}
-
-// LogRefs implements refsvfs2.CheckedObject.LogRefs.
-//
-// This should only be set to true for debugging purposes, as it can generate an
-// extremely large amount of output and drastically degrade performance.
-func (d *dentry) LogRefs() bool {
- return false
-}
-
-// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
-func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {
- //TODO(b/159261227): Implement InotifyWithParent.
-}
-
-// Watches implements vfs.DentryImpl.Watches.
-func (d *dentry) Watches() *vfs.Watches {
- //TODO(b/159261227): Implement Watches.
- return nil
-}
-
-// OnZeroWatches implements vfs.DentryImpl.OnZeroWatches.
-func (d *dentry) OnZeroWatches(context.Context) {
- //TODO(b/159261227): Implement OnZeroWatches.
-}
-
-func (d *dentry) isSymlink() bool {
- return atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFLNK
-}
-
-func (d *dentry) isDir() bool {
- return atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFDIR
-}
-
-func (d *dentry) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {
- return vfs.GenericCheckPermissions(creds, ats, linux.FileMode(atomic.LoadUint32(&d.mode)), auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid)))
-}
-
-// verityEnabled checks whether the file is enabled with verity features. It
-// should always be true if runtime enable is not allowed. In runtime enable
-// mode, it returns true if the target has been enabled with
-// ioctl(FS_IOC_ENABLE_VERITY).
-func (d *dentry) verityEnabled() bool {
- return !d.fs.allowRuntimeEnable || len(d.hash) != 0
-}
-
-// getLowerAt returns the dentry in the underlying file system, which is
-// represented by filename relative to d.
-func (d *dentry) getLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, filename string) (vfs.VirtualDentry, error) {
- return vfsObj.GetDentryAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- Path: fspath.Parse(filename),
- }, &vfs.GetDentryOptions{})
-}
-
-func (d *dentry) readlink(ctx context.Context) (string, error) {
- return d.fs.vfsfs.VirtualFilesystem().ReadlinkAt(ctx, d.fs.creds, &vfs.PathOperation{
- Root: d.lowerVD,
- Start: d.lowerVD,
- })
-}
-
-// FileDescription implements vfs.FileDescriptionImpl for verity fds.
-// FileDescription is a wrapper of the underlying lowerFD, with support to build
-// Merkle trees through the Linux fs-verity API to verify contents read from
-// lowerFD.
-//
-// +stateify savable
-type fileDescription struct {
- vfsfd vfs.FileDescription
- vfs.FileDescriptionDefaultImpl
- vfs.LockFD
-
- // d is the corresponding dentry to the fileDescription.
- d *dentry
-
- // isDir specifies whehter the fileDescription points to a directory.
- isDir bool
-
- // lowerFD is the FileDescription corresponding to the file in the
- // underlying file system.
- lowerFD *vfs.FileDescription
-
- // merkleReader is the read-only FileDescription corresponding to the
- // Merkle tree file in the underlying file system.
- merkleReader *vfs.FileDescription
-
- // merkleWriter is the FileDescription corresponding to the Merkle tree
- // file in the underlying file system for writing. This should only be
- // used when allowRuntimeEnable is set to true.
- merkleWriter *vfs.FileDescription
-
- // parentMerkleWriter is the FileDescription of the Merkle tree for the
- // directory that contains the current file/directory. This is only used
- // if allowRuntimeEnable is set to true.
- parentMerkleWriter *vfs.FileDescription
-
- // off is the file offset. off is protected by mu.
- mu sync.Mutex `state:"nosave"`
- off int64
-}
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *fileDescription) Release(ctx context.Context) {
- fd.lowerFD.DecRef(ctx)
- fd.merkleReader.DecRef(ctx)
- if fd.merkleWriter != nil {
- fd.merkleWriter.DecRef(ctx)
- }
- if fd.parentMerkleWriter != nil {
- fd.parentMerkleWriter.DecRef(ctx)
- }
-}
-
-// Stat implements vfs.FileDescriptionImpl.Stat.
-func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
- // TODO(b/162788573): Add integrity check for metadata.
- stat, err := fd.lowerFD.Stat(ctx, opts)
- if err != nil {
- return linux.Statx{}, err
- }
- if fd.d.verityEnabled() {
- if err := fd.d.fs.verifyStat(ctx, fd.d, stat); err != nil {
- return linux.Statx{}, err
- }
- }
- return stat, nil
-}
-
-// SetStat implements vfs.FileDescriptionImpl.SetStat.
-func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
- // Verity files are read-only.
- return syserror.EPERM
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *fileDescription) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- fd.mu.Lock()
- defer fd.mu.Unlock()
- n := int64(0)
- switch whence {
- case linux.SEEK_SET:
- // use offset as specified
- case linux.SEEK_CUR:
- n = fd.off
- case linux.SEEK_END:
- n = int64(fd.d.size)
- default:
- return 0, syserror.EINVAL
- }
- if offset > math.MaxInt64-n {
- return 0, syserror.EINVAL
- }
- offset += n
- if offset < 0 {
- return 0, syserror.EINVAL
- }
- fd.off = offset
- return offset, nil
-}
-
-// generateMerkle generates a Merkle tree file for fd. If fd points to a file
-// /foo/bar, a Merkle tree file /foo/.merkle.verity.bar is generated. The hash
-// of the generated Merkle tree and the data size is returned. If fd points to
-// a regular file, the data is the content of the file. If fd points to a
-// directory, the data is all hahes of its children, written to the Merkle tree
-// file.
-func (fd *fileDescription) generateMerkle(ctx context.Context) ([]byte, uint64, error) {
- fdReader := vfs.FileReadWriteSeeker{
- FD: fd.lowerFD,
- Ctx: ctx,
- }
- merkleReader := vfs.FileReadWriteSeeker{
- FD: fd.merkleReader,
- Ctx: ctx,
- }
- merkleWriter := vfs.FileReadWriteSeeker{
- FD: fd.merkleWriter,
- Ctx: ctx,
- }
- params := &merkletree.GenerateParams{
- TreeReader: &merkleReader,
- TreeWriter: &merkleWriter,
- //TODO(b/156980949): Support passing other hash algorithms.
- HashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- }
-
- switch atomic.LoadUint32(&fd.d.mode) & linux.S_IFMT {
- case linux.S_IFREG:
- // For a regular file, generate a Merkle tree based on its
- // content.
- var err error
- stat, err := fd.lowerFD.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- return nil, 0, err
- }
-
- params.File = &fdReader
- params.Size = int64(stat.Size)
- params.Name = fd.d.name
- params.Mode = uint32(stat.Mode)
- params.UID = stat.UID
- params.GID = stat.GID
- params.DataAndTreeInSameFile = false
- case linux.S_IFDIR:
- // For a directory, generate a Merkle tree based on the hashes
- // of its children that has already been written to the Merkle
- // tree file.
- merkleStat, err := fd.merkleReader.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- return nil, 0, err
- }
-
- params.Size = int64(merkleStat.Size)
-
- stat, err := fd.lowerFD.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- return nil, 0, err
- }
-
- params.File = &merkleReader
- params.Name = fd.d.name
- params.Mode = uint32(stat.Mode)
- params.UID = stat.UID
- params.GID = stat.GID
- params.DataAndTreeInSameFile = true
- default:
- // TODO(b/167728857): Investigate whether and how we should
- // enable other types of file.
- return nil, 0, syserror.EINVAL
- }
- hash, err := merkletree.Generate(params)
- return hash, uint64(params.Size), err
-}
-
-// enableVerity enables verity features on fd by generating a Merkle tree file
-// and stores its hash in its parent directory's Merkle tree.
-func (fd *fileDescription) enableVerity(ctx context.Context, uio usermem.IO) (uintptr, error) {
- if !fd.d.fs.allowRuntimeEnable {
- return 0, syserror.EPERM
- }
-
- fd.d.fs.verityMu.Lock()
- defer fd.d.fs.verityMu.Unlock()
-
- // In allowRuntimeEnable mode, the underlying fd and read/write fd for
- // the Merkle tree file should have all been initialized. For any file
- // or directory other than the root, the parent Merkle tree file should
- // have also been initialized.
- if fd.lowerFD == nil || fd.merkleReader == nil || fd.merkleWriter == nil || (fd.parentMerkleWriter == nil && fd.d != fd.d.fs.rootDentry) {
- return 0, alertIntegrityViolation("Unexpected verity fd: missing expected underlying fds")
- }
-
- hash, dataSize, err := fd.generateMerkle(ctx)
- if err != nil {
- return 0, err
- }
-
- if fd.parentMerkleWriter != nil {
- stat, err := fd.parentMerkleWriter.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- return 0, err
- }
-
- // Write the hash of fd to the parent directory's Merkle tree
- // file, as it should be part of the parent Merkle tree data.
- // parentMerkleWriter is open with O_APPEND, so it should write
- // directly to the end of the file.
- if _, err = fd.parentMerkleWriter.Write(ctx, usermem.BytesIOSequence(hash), vfs.WriteOptions{}); err != nil {
- return 0, err
- }
-
- // Record the offset of the hash of fd in parent directory's
- // Merkle tree file.
- if err := fd.merkleWriter.SetXattr(ctx, &vfs.SetXattrOptions{
- Name: merkleOffsetInParentXattr,
- Value: strconv.Itoa(int(stat.Size)),
- }); err != nil {
- return 0, err
- }
- }
-
- // Record the size of the data being hashed for fd.
- if err := fd.merkleWriter.SetXattr(ctx, &vfs.SetXattrOptions{
- Name: merkleSizeXattr,
- Value: strconv.Itoa(int(dataSize)),
- }); err != nil {
- return 0, err
- }
- fd.d.hash = append(fd.d.hash, hash...)
- return 0, nil
-}
-
-// measureVerity returns the hash of fd, saved in verityDigest.
-func (fd *fileDescription) measureVerity(ctx context.Context, uio usermem.IO, verityDigest usermem.Addr) (uintptr, error) {
- t := kernel.TaskFromContext(ctx)
- if t == nil {
- return 0, syserror.EINVAL
- }
- var metadata linux.DigestMetadata
-
- // If allowRuntimeEnable is true, an empty fd.d.hash indicates that
- // verity is not enabled for the file. If allowRuntimeEnable is false,
- // this is an integrity violation because all files should have verity
- // enabled, in which case fd.d.hash should be set.
- if len(fd.d.hash) == 0 {
- if fd.d.fs.allowRuntimeEnable {
- return 0, syserror.ENODATA
- }
- return 0, alertIntegrityViolation("Ioctl measureVerity: no hash found")
- }
-
- // The first part of VerityDigest is the metadata.
- if _, err := metadata.CopyIn(t, verityDigest); err != nil {
- return 0, err
- }
- if metadata.DigestSize < uint16(len(fd.d.hash)) {
- return 0, syserror.EOVERFLOW
- }
-
- // Populate the output digest size, since DigestSize is both input and
- // output.
- metadata.DigestSize = uint16(len(fd.d.hash))
-
- // First copy the metadata.
- if _, err := metadata.CopyOut(t, verityDigest); err != nil {
- return 0, err
- }
-
- // Now copy the root hash bytes to the memory after metadata.
- _, err := t.CopyOutBytes(usermem.Addr(uintptr(verityDigest)+linux.SizeOfDigestMetadata), fd.d.hash)
- return 0, err
-}
-
-func (fd *fileDescription) verityFlags(ctx context.Context, uio usermem.IO, flags usermem.Addr) (uintptr, error) {
- f := int32(0)
-
- // All enabled files should store a hash. This flag is not settable via
- // FS_IOC_SETFLAGS.
- if len(fd.d.hash) != 0 {
- f |= linux.FS_VERITY_FL
- }
-
- t := kernel.TaskFromContext(ctx)
- if t == nil {
- return 0, syserror.EINVAL
- }
- _, err := primitive.CopyInt32Out(t, flags, f)
- return 0, err
-}
-
-// Ioctl implements vfs.FileDescriptionImpl.Ioctl.
-func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {
- switch cmd := args[1].Uint(); cmd {
- case linux.FS_IOC_ENABLE_VERITY:
- return fd.enableVerity(ctx, uio)
- case linux.FS_IOC_MEASURE_VERITY:
- return fd.measureVerity(ctx, uio, args[2].Pointer())
- case linux.FS_IOC_GETFLAGS:
- return fd.verityFlags(ctx, uio, args[2].Pointer())
- default:
- // TODO(b/169682228): Investigate which ioctl commands should
- // be allowed.
- return 0, syserror.ENOSYS
- }
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *fileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- // Implement Read with PRead by setting offset.
- fd.mu.Lock()
- n, err := fd.PRead(ctx, dst, fd.off, opts)
- fd.off += n
- fd.mu.Unlock()
- return n, err
-}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- // No need to verify if the file is not enabled yet in
- // allowRuntimeEnable mode.
- if !fd.d.verityEnabled() {
- return fd.lowerFD.PRead(ctx, dst, offset, opts)
- }
-
- fd.d.fs.verityMu.RLock()
- defer fd.d.fs.verityMu.RUnlock()
- // dataSize is the size of the whole file.
- dataSize, err := fd.merkleReader.GetXattr(ctx, &vfs.GetXattrOptions{
- Name: merkleSizeXattr,
- Size: sizeOfStringInt32,
- })
-
- // The Merkle tree file for the child should have been created and
- // contains the expected xattrs. If the xattr does not exist, it
- // indicates unexpected modifications to the file system.
- if err == syserror.ENODATA {
- return 0, alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", merkleSizeXattr, err))
- }
- if err != nil {
- return 0, err
- }
-
- // The dataSize xattr should be an integer. If it's not, it indicates
- // unexpected modifications to the file system.
- size, err := strconv.Atoi(dataSize)
- if err != nil {
- return 0, alertIntegrityViolation(fmt.Sprintf("Failed to convert xattr %s to int: %v", merkleSizeXattr, err))
- }
-
- dataReader := vfs.FileReadWriteSeeker{
- FD: fd.lowerFD,
- Ctx: ctx,
- }
-
- merkleReader := vfs.FileReadWriteSeeker{
- FD: fd.merkleReader,
- Ctx: ctx,
- }
-
- n, err := merkletree.Verify(&merkletree.VerifyParams{
- Out: dst.Writer(ctx),
- File: &dataReader,
- Tree: &merkleReader,
- Size: int64(size),
- Name: fd.d.name,
- Mode: fd.d.mode,
- UID: fd.d.uid,
- GID: fd.d.gid,
- //TODO(b/156980949): Support passing other hash algorithms.
- HashAlgorithms: linux.FS_VERITY_HASH_ALG_SHA256,
- ReadOffset: offset,
- ReadSize: dst.NumBytes(),
- Expected: fd.d.hash,
- DataAndTreeInSameFile: false,
- })
- if err != nil {
- return 0, alertIntegrityViolation(fmt.Sprintf("Verification failed: %v", err))
- }
- return n, err
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *fileDescription) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EROFS
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *fileDescription) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EROFS
-}
-
-// LockPOSIX implements vfs.FileDescriptionImpl.LockPOSIX.
-func (fd *fileDescription) LockPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, start, length uint64, whence int16, block fslock.Blocker) error {
- return fd.lowerFD.LockPOSIX(ctx, uid, t, start, length, whence, block)
-}
-
-// UnlockPOSIX implements vfs.FileDescriptionImpl.UnlockPOSIX.
-func (fd *fileDescription) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, start, length uint64, whence int16) error {
- return fd.lowerFD.UnlockPOSIX(ctx, uid, start, length, whence)
-}
diff --git a/pkg/sentry/fsimpl/verity/verity_test.go b/pkg/sentry/fsimpl/verity/verity_test.go
deleted file mode 100644
index c647cbfd3..000000000
--- a/pkg/sentry/fsimpl/verity/verity_test.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verity
-
-import (
- "fmt"
- "io"
- "math/rand"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// rootMerkleFilename is the name of the root Merkle tree file.
-const rootMerkleFilename = "root.verity"
-
-// maxDataSize is the maximum data size written to the file for test.
-const maxDataSize = 100000
-
-// newVerityRoot creates a new verity mount, and returns the root. The
-// underlying file system is tmpfs. If the error is not nil, then cleanup
-// should be called when the root is no longer needed.
-func newVerityRoot(t *testing.T) (*vfs.VirtualFilesystem, vfs.VirtualDentry, *kernel.Task, error) {
- k, err := testutil.Boot()
- if err != nil {
- t.Fatalf("testutil.Boot: %v", err)
- }
-
- ctx := k.SupervisorContext()
-
- rand.Seed(time.Now().UnixNano())
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("VFS init: %v", err)
- }
-
- vfsObj.MustRegisterFilesystemType("verity", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- vfsObj.MustRegisterFilesystemType("tmpfs", tmpfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
-
- mntns, err := vfsObj.NewMountNamespace(ctx, auth.CredentialsFromContext(ctx), "", "verity", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: InternalFilesystemOptions{
- RootMerkleFileName: rootMerkleFilename,
- LowerName: "tmpfs",
- AllowRuntimeEnable: true,
- NoCrashOnVerificationFailure: true,
- },
- },
- })
- if err != nil {
- return nil, vfs.VirtualDentry{}, nil, fmt.Errorf("NewMountNamespace: %v", err)
- }
- root := mntns.Root()
- root.IncRef()
-
- // Use lowerRoot in the task as we modify the lower file system
- // directly in many tests.
- lowerRoot := root.Dentry().Impl().(*dentry).lowerVD
- tc := k.NewThreadGroup(nil, k.RootPIDNamespace(), kernel.NewSignalHandlers(), linux.SIGCHLD, k.GlobalInit().Limits())
- task, err := testutil.CreateTask(ctx, "name", tc, mntns, lowerRoot, lowerRoot)
- if err != nil {
- t.Fatalf("testutil.CreateTask: %v", err)
- }
-
- t.Helper()
- t.Cleanup(func() {
- root.DecRef(ctx)
- mntns.DecRef(ctx)
- })
- return vfsObj, root, task, nil
-}
-
-// newFileFD creates a new file in the verity mount, and returns the FD. The FD
-// points to a file that has random data generated.
-func newFileFD(ctx context.Context, vfsObj *vfs.VirtualFilesystem, root vfs.VirtualDentry, filePath string, mode linux.FileMode) (*vfs.FileDescription, int, error) {
- creds := auth.CredentialsFromContext(ctx)
- lowerRoot := root.Dentry().Impl().(*dentry).lowerVD
-
- // Create the file in the underlying file system.
- lowerFD, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: lowerRoot,
- Start: lowerRoot,
- Path: fspath.Parse(filePath),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_CREAT | linux.O_EXCL,
- Mode: linux.ModeRegular | mode,
- })
- if err != nil {
- return nil, 0, err
- }
-
- // Generate random data to be written to the file.
- dataSize := rand.Intn(maxDataSize) + 1
- data := make([]byte, dataSize)
- rand.Read(data)
-
- // Write directly to the underlying FD, since verity FD is read-only.
- n, err := lowerFD.Write(ctx, usermem.BytesIOSequence(data), vfs.WriteOptions{})
- if err != nil {
- return nil, 0, err
- }
-
- if n != int64(len(data)) {
- return nil, 0, fmt.Errorf("lowerFD.Write got write length %d, want %d", n, len(data))
- }
-
- lowerFD.DecRef(ctx)
-
- // Now open the verity file descriptor.
- fd, err := vfsObj.OpenAt(ctx, creds, &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filePath),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- Mode: linux.ModeRegular | mode,
- })
- return fd, dataSize, err
-}
-
-// corruptRandomBit randomly flips a bit in the file represented by fd.
-func corruptRandomBit(ctx context.Context, fd *vfs.FileDescription, size int) error {
- // Flip a random bit in the underlying file.
- randomPos := int64(rand.Intn(size))
- byteToModify := make([]byte, 1)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.ReadOptions{}); err != nil {
- return fmt.Errorf("lowerFD.PRead: %v", err)
- }
- byteToModify[0] ^= 1
- if _, err := fd.PWrite(ctx, usermem.BytesIOSequence(byteToModify), randomPos, vfs.WriteOptions{}); err != nil {
- return fmt.Errorf("lowerFD.PWrite: %v", err)
- }
- return nil
-}
-
-// TestOpen ensures that when a file is created, the corresponding Merkle tree
-// file and the root Merkle tree file exist.
-func TestOpen(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- if _, _, err := newFileFD(ctx, vfsObj, root, filename, 0644); err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Ensure that the corresponding Merkle tree file is created.
- lowerRoot := root.Dentry().Impl().(*dentry).lowerVD
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: lowerRoot,
- Start: lowerRoot,
- Path: fspath.Parse(merklePrefix + filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- }); err != nil {
- t.Errorf("OpenAt Merkle tree file %s: %v", merklePrefix+filename, err)
- }
-
- // Ensure the root merkle tree file is created.
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: lowerRoot,
- Start: lowerRoot,
- Path: fspath.Parse(merklePrefix + rootMerkleFilename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- }); err != nil {
- t.Errorf("OpenAt root Merkle tree file %s: %v", merklePrefix+rootMerkleFilename, err)
- }
-}
-
-// TestPReadUnmodifiedFileSucceeds ensures that pread from an untouched verity
-// file succeeds after enabling verity for it.
-func TestPReadUnmodifiedFileSucceeds(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm a normal read succeeds.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- buf := make([]byte, size)
- n, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.PRead: %v", err)
- }
-
- if n != int64(size) {
- t.Errorf("fd.PRead got read length %d, want %d", n, size)
- }
-}
-
-// TestReadUnmodifiedFileSucceeds ensures that read from an untouched verity
-// file succeeds after enabling verity for it.
-func TestReadUnmodifiedFileSucceeds(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirm a normal read succeeds.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- buf := make([]byte, size)
- n, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{})
- if err != nil && err != io.EOF {
- t.Fatalf("fd.Read: %v", err)
- }
-
- if n != int64(size) {
- t.Errorf("fd.PRead got read length %d, want %d", n, size)
- }
-}
-
-// TestReopenUnmodifiedFileSucceeds ensures that reopen an untouched verity file
-// succeeds after enabling verity for it.
-func TestReopenUnmodifiedFileSucceeds(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirms a normal read succeeds.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Ensure reopening the verity enabled file succeeds.
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- Mode: linux.ModeRegular,
- }); err != nil {
- t.Errorf("reopen enabled file failed: %v", err)
- }
-}
-
-// TestPReadModifiedFileFails ensures that read from a modified verity file
-// fails.
-func TestPReadModifiedFileFails(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Open a new lowerFD that's read/writable.
- lowerVD := fd.Impl().(*fileDescription).d.lowerVD
-
- lowerFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- if err := corruptRandomBit(ctx, lowerFD, size); err != nil {
- t.Fatalf("corruptRandomBit: %v", err)
- }
-
- // Confirm that read from the modified file fails.
- buf := make([]byte, size)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {
- t.Fatalf("fd.PRead succeeded, expected failure")
- }
-}
-
-// TestReadModifiedFileFails ensures that read from a modified verity file
-// fails.
-func TestReadModifiedFileFails(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Open a new lowerFD that's read/writable.
- lowerVD := fd.Impl().(*fileDescription).d.lowerVD
-
- lowerFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: lowerVD,
- Start: lowerVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- if err := corruptRandomBit(ctx, lowerFD, size); err != nil {
- t.Fatalf("corruptRandomBit: %v", err)
- }
-
- // Confirm that read from the modified file fails.
- buf := make([]byte, size)
- if _, err := fd.Read(ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{}); err == nil {
- t.Fatalf("fd.Read succeeded, expected failure")
- }
-}
-
-// TestModifiedMerkleFails ensures that read from a verity file fails if the
-// corresponding Merkle tree file is modified.
-func TestModifiedMerkleFails(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, size, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Open a new lowerMerkleFD that's read/writable.
- lowerMerkleVD := fd.Impl().(*fileDescription).d.lowerMerkleVD
-
- lowerMerkleFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: lowerMerkleVD,
- Start: lowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- // Flip a random bit in the Merkle tree file.
- stat, err := lowerMerkleFD.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Fatalf("stat: %v", err)
- }
- merkleSize := int(stat.Size)
- if err := corruptRandomBit(ctx, lowerMerkleFD, merkleSize); err != nil {
- t.Fatalf("corruptRandomBit: %v", err)
- }
-
- // Confirm that read from a file with modified Merkle tree fails.
- buf := make([]byte, size)
- if _, err := fd.PRead(ctx, usermem.BytesIOSequence(buf), 0 /* offset */, vfs.ReadOptions{}); err == nil {
- fmt.Println(buf)
- t.Fatalf("fd.PRead succeeded with modified Merkle file")
- }
-}
-
-// TestModifiedParentMerkleFails ensures that open a verity enabled file in a
-// verity enabled directory fails if the hashes related to the target file in
-// the parent Merkle tree file is modified.
-func TestModifiedParentMerkleFails(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Enable verity on the parent directory.
- parentFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- })
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- if _, err := parentFD.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- // Open a new lowerMerkleFD that's read/writable.
- parentLowerMerkleVD := fd.Impl().(*fileDescription).d.parent.lowerMerkleVD
-
- parentLowerMerkleFD, err := vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: parentLowerMerkleVD,
- Start: parentLowerMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR,
- })
- if err != nil {
- t.Fatalf("OpenAt: %v", err)
- }
-
- // Flip a random bit in the parent Merkle tree file.
- // This parent directory contains only one child, so any random
- // modification in the parent Merkle tree should cause verification
- // failure when opening the child file.
- stat, err := parentLowerMerkleFD.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Fatalf("stat: %v", err)
- }
- parentMerkleSize := int(stat.Size)
- if err := corruptRandomBit(ctx, parentLowerMerkleFD, parentMerkleSize); err != nil {
- t.Fatalf("corruptRandomBit: %v", err)
- }
-
- parentLowerMerkleFD.DecRef(ctx)
-
- // Ensure reopening the verity enabled file fails.
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- Mode: linux.ModeRegular,
- }); err == nil {
- t.Errorf("OpenAt file with modified parent Merkle succeeded")
- }
-}
-
-// TestUnmodifiedStatSucceeds ensures that stat of an untouched verity file
-// succeeds after enabling verity for it.
-func TestUnmodifiedStatSucceeds(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file and confirms stat succeeds.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("fd.Ioctl: %v", err)
- }
-
- if _, err := fd.Stat(ctx, vfs.StatOptions{}); err != nil {
- t.Errorf("fd.Stat: %v", err)
- }
-}
-
-// TestModifiedStatFails checks that getting stat for a file with modified stat
-// should fail.
-func TestModifiedStatFails(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("fd.Ioctl: %v", err)
- }
-
- lowerFD := fd.Impl().(*fileDescription).lowerFD
- // Change the stat of the underlying file, and check that stat fails.
- if err := lowerFD.SetStat(ctx, vfs.SetStatOptions{
- Stat: linux.Statx{
- Mask: uint32(linux.STATX_MODE),
- Mode: 0777,
- },
- }); err != nil {
- t.Fatalf("lowerFD.SetStat: %v", err)
- }
-
- if _, err := fd.Stat(ctx, vfs.StatOptions{}); err == nil {
- t.Errorf("fd.Stat succeeded when it should fail")
- }
-}
-
-// TestOpenDeletedOrRenamedFileFails ensures that opening a deleted/renamed
-// verity enabled file or the corresponding Merkle tree file fails with the
-// verify error.
-func TestOpenDeletedFileFails(t *testing.T) {
- testCases := []struct {
- // Tests removing files is remove is true. Otherwise tests
- // renaming files.
- remove bool
- // The original file is removed/renamed if changeFile is true.
- changeFile bool
- // The Merkle tree file is removed/renamed if changeMerkleFile
- // is true.
- changeMerkleFile bool
- }{
- {
- remove: true,
- changeFile: true,
- changeMerkleFile: false,
- },
- {
- remove: true,
- changeFile: false,
- changeMerkleFile: true,
- },
- {
- remove: false,
- changeFile: true,
- changeMerkleFile: false,
- },
- {
- remove: false,
- changeFile: true,
- changeMerkleFile: false,
- },
- }
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("remove:%t", tc.remove), func(t *testing.T) {
- vfsObj, root, ctx, err := newVerityRoot(t)
- if err != nil {
- t.Fatalf("newVerityRoot: %v", err)
- }
-
- filename := "verity-test-file"
- fd, _, err := newFileFD(ctx, vfsObj, root, filename, 0644)
- if err != nil {
- t.Fatalf("newFileFD: %v", err)
- }
-
- // Enable verity on the file.
- var args arch.SyscallArguments
- args[1] = arch.SyscallArgument{Value: linux.FS_IOC_ENABLE_VERITY}
- if _, err := fd.Ioctl(ctx, nil /* uio */, args); err != nil {
- t.Fatalf("Ioctl: %v", err)
- }
-
- rootLowerVD := root.Dentry().Impl().(*dentry).lowerVD
- if tc.remove {
- if tc.changeFile {
- if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(filename),
- }); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := vfsObj.UnlinkAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + filename),
- }); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- } else {
- newFilename := "renamed-test-file"
- if tc.changeFile {
- if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(filename),
- }, &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(newFilename),
- }, &vfs.RenameOptions{}); err != nil {
- t.Fatalf("RenameAt: %v", err)
- }
- }
- if tc.changeMerkleFile {
- if err := vfsObj.RenameAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + filename),
- }, &vfs.PathOperation{
- Root: rootLowerVD,
- Start: rootLowerVD,
- Path: fspath.Parse(merklePrefix + newFilename),
- }, &vfs.RenameOptions{}); err != nil {
- t.Fatalf("UnlinkAt: %v", err)
- }
- }
- }
-
- // Ensure reopening the verity enabled file fails.
- if _, err = vfsObj.OpenAt(ctx, auth.CredentialsFromContext(ctx), &vfs.PathOperation{
- Root: root,
- Start: root,
- Path: fspath.Parse(filename),
- }, &vfs.OpenOptions{
- Flags: linux.O_RDONLY,
- Mode: linux.ModeRegular,
- }); err != syserror.EIO {
- t.Errorf("got OpenAt error: %v, expected EIO", err)
- }
- })
- }
-}