summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry/vfs
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry/vfs')
-rw-r--r--pkg/sentry/vfs/BUILD141
-rw-r--r--pkg/sentry/vfs/README.md186
-rw-r--r--pkg/sentry/vfs/epoll_interest_list.go221
-rw-r--r--pkg/sentry/vfs/event_list.go221
-rw-r--r--pkg/sentry/vfs/file_description_impl_util_test.go224
-rw-r--r--pkg/sentry/vfs/file_description_refs.go140
-rw-r--r--pkg/sentry/vfs/filesystem_refs.go140
-rw-r--r--pkg/sentry/vfs/g3doc/inotify.md210
-rw-r--r--pkg/sentry/vfs/genericfstree/BUILD16
-rw-r--r--pkg/sentry/vfs/genericfstree/genericfstree.go92
-rw-r--r--pkg/sentry/vfs/memxattr/BUILD16
-rw-r--r--pkg/sentry/vfs/memxattr/memxattr_state_autogen.go36
-rw-r--r--pkg/sentry/vfs/mount_namespace_refs.go140
-rw-r--r--pkg/sentry/vfs/mount_test.go467
-rw-r--r--pkg/sentry/vfs/vfs_state_autogen.go2052
-rw-r--r--pkg/sentry/vfs/vfs_unsafe_state_autogen.go3
16 files changed, 2953 insertions, 1352 deletions
diff --git a/pkg/sentry/vfs/BUILD b/pkg/sentry/vfs/BUILD
deleted file mode 100644
index ac60fe8bf..000000000
--- a/pkg/sentry/vfs/BUILD
+++ /dev/null
@@ -1,141 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-licenses(["notice"])
-
-go_template_instance(
- name = "epoll_interest_list",
- out = "epoll_interest_list.go",
- package = "vfs",
- prefix = "epollInterest",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*epollInterest",
- "Linker": "*epollInterest",
- },
-)
-
-go_template_instance(
- name = "event_list",
- out = "event_list.go",
- package = "vfs",
- prefix = "event",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*Event",
- "Linker": "*Event",
- },
-)
-
-go_template_instance(
- name = "file_description_refs",
- out = "file_description_refs.go",
- package = "vfs",
- prefix = "FileDescription",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "FileDescription",
- },
-)
-
-go_template_instance(
- name = "mount_namespace_refs",
- out = "mount_namespace_refs.go",
- package = "vfs",
- prefix = "MountNamespace",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "MountNamespace",
- },
-)
-
-go_template_instance(
- name = "filesystem_refs",
- out = "filesystem_refs.go",
- package = "vfs",
- prefix = "Filesystem",
- template = "//pkg/refsvfs2:refs_template",
- types = {
- "T": "Filesystem",
- },
-)
-
-go_library(
- name = "vfs",
- srcs = [
- "anonfs.go",
- "context.go",
- "debug.go",
- "dentry.go",
- "device.go",
- "epoll.go",
- "epoll_interest_list.go",
- "event_list.go",
- "file_description.go",
- "file_description_impl_util.go",
- "file_description_refs.go",
- "filesystem.go",
- "filesystem_impl_util.go",
- "filesystem_refs.go",
- "filesystem_type.go",
- "inotify.go",
- "lock.go",
- "mount.go",
- "mount_namespace_refs.go",
- "mount_unsafe.go",
- "opath.go",
- "options.go",
- "pathname.go",
- "permissions.go",
- "resolving_path.go",
- "save_restore.go",
- "vfs.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fd",
- "//pkg/fdnotifier",
- "//pkg/fspath",
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/log",
- "//pkg/refs",
- "//pkg/refsvfs2",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsmetric",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- "//pkg/sentry/limits",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/uniqueid",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-go_test(
- name = "vfs_test",
- size = "small",
- srcs = [
- "file_description_impl_util_test.go",
- "mount_test.go",
- ],
- library = ":vfs",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- ],
-)
diff --git a/pkg/sentry/vfs/README.md b/pkg/sentry/vfs/README.md
deleted file mode 100644
index 5aad31b78..000000000
--- a/pkg/sentry/vfs/README.md
+++ /dev/null
@@ -1,186 +0,0 @@
-# The gVisor Virtual Filesystem
-
-THIS PACKAGE IS CURRENTLY EXPERIMENTAL AND NOT READY OR ENABLED FOR PRODUCTION
-USE. For the filesystem implementation currently used by gVisor, see the `fs`
-package.
-
-## Implementation Notes
-
-### Reference Counting
-
-Filesystem, Dentry, Mount, MountNamespace, and FileDescription are all
-reference-counted. Mount and MountNamespace are exclusively VFS-managed; when
-their reference count reaches zero, VFS releases their resources. Filesystem and
-FileDescription management is shared between VFS and filesystem implementations;
-when their reference count reaches zero, VFS notifies the implementation by
-calling `FilesystemImpl.Release()` or `FileDescriptionImpl.Release()`
-respectively and then releases VFS-owned resources. Dentries are exclusively
-managed by filesystem implementations; reference count changes are abstracted
-through DentryImpl, which should release resources when reference count reaches
-zero.
-
-Filesystem references are held by:
-
-- Mount: Each referenced Mount holds a reference on the mounted Filesystem.
-
-Dentry references are held by:
-
-- FileDescription: Each referenced FileDescription holds a reference on the
- Dentry through which it was opened, via `FileDescription.vd.dentry`.
-
-- Mount: Each referenced Mount holds a reference on its mount point and on the
- mounted filesystem root. The mount point is mutable (`mount(MS_MOVE)`).
-
-Mount references are held by:
-
-- FileDescription: Each referenced FileDescription holds a reference on the
- Mount on which it was opened, via `FileDescription.vd.mount`.
-
-- Mount: Each referenced Mount holds a reference on its parent, which is the
- mount containing its mount point.
-
-- VirtualFilesystem: A reference is held on each Mount that has been connected
- to a mount point, but not yet umounted.
-
-MountNamespace and FileDescription references are held by users of VFS. The
-expectation is that each `kernel.Task` holds a reference on its corresponding
-MountNamespace, and each file descriptor holds a reference on its represented
-FileDescription.
-
-Notes:
-
-- Dentries do not hold a reference on their owning Filesystem. Instead, all
- uses of a Dentry occur in the context of a Mount, which holds a reference on
- the relevant Filesystem (see e.g. the VirtualDentry type). As a corollary,
- when releasing references on both a Dentry and its corresponding Mount, the
- Dentry's reference must be released first (because releasing the Mount's
- reference may release the last reference on the Filesystem, whose state may
- be required to release the Dentry reference).
-
-### The Inheritance Pattern
-
-Filesystem, Dentry, and FileDescription are all concepts featuring both state
-that must be shared between VFS and filesystem implementations, and operations
-that are implementation-defined. To facilitate this, each of these three
-concepts follows the same pattern, shown below for Dentry:
-
-```go
-// Dentry represents a node in a filesystem tree.
-type Dentry struct {
- // VFS-required dentry state.
- parent *Dentry
- // ...
-
- // impl is the DentryImpl associated with this Dentry. impl is immutable.
- // This should be the last field in Dentry.
- impl DentryImpl
-}
-
-// Init must be called before first use of d.
-func (d *Dentry) Init(impl DentryImpl) {
- d.impl = impl
-}
-
-// Impl returns the DentryImpl associated with d.
-func (d *Dentry) Impl() DentryImpl {
- return d.impl
-}
-
-// DentryImpl contains implementation-specific details of a Dentry.
-// Implementations of DentryImpl should contain their associated Dentry by
-// value as their first field.
-type DentryImpl interface {
- // VFS-required implementation-defined dentry operations.
- IncRef()
- // ...
-}
-```
-
-This construction, which is essentially a type-safe analogue to Linux's
-`container_of` pattern, has the following properties:
-
-- VFS works almost exclusively with pointers to Dentry rather than DentryImpl
- interface objects, such as in the type of `Dentry.parent`. This avoids
- interface method calls (which are somewhat expensive to perform, and defeat
- inlining and escape analysis), reduces the size of VFS types (since an
- interface object is two pointers in size), and allows pointers to be loaded
- and stored atomically using `sync/atomic`. Implementation-defined behavior
- is accessed via `Dentry.impl` when required.
-
-- Filesystem implementations can access the implementation-defined state
- associated with objects of VFS types by type-asserting or type-switching
- (e.g. `Dentry.Impl().(*myDentry)`). Type assertions to a concrete type
- require only an equality comparison of the interface object's type pointer
- to a static constant, and are consequently very fast.
-
-- Filesystem implementations can access the VFS state associated with objects
- of implementation-defined types directly.
-
-- VFS and implementation-defined state for a given type occupy the same
- object, minimizing memory allocations and maximizing memory locality. `impl`
- is the last field in `Dentry`, and `Dentry` is the first field in
- `DentryImpl` implementations, for similar reasons: this tends to cause
- fetching of the `Dentry.impl` interface object to also fetch `DentryImpl`
- fields, either because they are in the same cache line or via next-line
- prefetching.
-
-## Future Work
-
-- Most `mount(2)` features, and unmounting, are incomplete.
-
-- VFS1 filesystems are not directly compatible with VFS2. It may be possible
- to implement shims that implement `vfs.FilesystemImpl` for
- `fs.MountNamespace`, `vfs.DentryImpl` for `fs.Dirent`, and
- `vfs.FileDescriptionImpl` for `fs.File`, which may be adequate for
- filesystems that are not performance-critical (e.g. sysfs); however, it is
- not clear that this will be less effort than simply porting the filesystems
- in question. Practically speaking, the following filesystems will probably
- need to be ported or made compatible through a shim to evaluate filesystem
- performance on realistic workloads:
-
- - devfs/procfs/sysfs, which will realistically be necessary to execute
- most applications. (Note that procfs and sysfs do not support hard
- links, so they do not require the complexity of separate inode objects.
- Also note that Linux's /dev is actually a variant of tmpfs called
- devtmpfs.)
-
- - tmpfs. This should be relatively straightforward: copy/paste memfs,
- store regular file contents in pgalloc-allocated memory instead of
- `[]byte`, and add support for file timestamps. (In fact, it probably
- makes more sense to convert memfs to tmpfs and not keep the former.)
-
- - A remote filesystem, either lisafs (if it is ready by the time that
- other benchmarking prerequisites are) or v9fs (aka 9P, aka gofers).
-
- - epoll files.
-
- Filesystems that will need to be ported before switching to VFS2, but can
- probably be skipped for early testing:
-
- - overlayfs, which is needed for (at least) synthetic mount points.
-
- - Support for host ttys.
-
- - timerfd files.
-
- Filesystems that can be probably dropped:
-
- - ashmem, which is far too incomplete to use.
-
- - binder, which is similarly far too incomplete to use.
-
-- Save/restore. For instance, it is unclear if the current implementation of
- the `state` package supports the inheritance pattern described above.
-
-- Many features that were previously implemented by VFS must now be
- implemented by individual filesystems (though, in most cases, this should
- consist of calls to hooks or libraries provided by `vfs` or other packages).
- This includes, but is not necessarily limited to:
-
- - Block and character device special files
-
- - Inotify
-
- - File locking
-
- - `O_ASYNC`
diff --git a/pkg/sentry/vfs/epoll_interest_list.go b/pkg/sentry/vfs/epoll_interest_list.go
new file mode 100644
index 000000000..e75ea361b
--- /dev/null
+++ b/pkg/sentry/vfs/epoll_interest_list.go
@@ -0,0 +1,221 @@
+package vfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type epollInterestElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (epollInterestElementMapper) linkerFor(elem *epollInterest) *epollInterest { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type epollInterestList struct {
+ head *epollInterest
+ tail *epollInterest
+}
+
+// Reset resets list l to the empty state.
+func (l *epollInterestList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *epollInterestList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *epollInterestList) Front() *epollInterest {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *epollInterestList) Back() *epollInterest {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *epollInterestList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (epollInterestElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *epollInterestList) PushFront(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ epollInterestElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *epollInterestList) PushBack(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ epollInterestElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *epollInterestList) PushBackList(m *epollInterestList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ epollInterestElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ epollInterestElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *epollInterestList) InsertAfter(b, e *epollInterest) {
+ bLinker := epollInterestElementMapper{}.linkerFor(b)
+ eLinker := epollInterestElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ epollInterestElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *epollInterestList) InsertBefore(a, e *epollInterest) {
+ aLinker := epollInterestElementMapper{}.linkerFor(a)
+ eLinker := epollInterestElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ epollInterestElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *epollInterestList) Remove(e *epollInterest) {
+ linker := epollInterestElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ epollInterestElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ epollInterestElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type epollInterestEntry struct {
+ next *epollInterest
+ prev *epollInterest
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) Next() *epollInterest {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) Prev() *epollInterest {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) SetNext(elem *epollInterest) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *epollInterestEntry) SetPrev(elem *epollInterest) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/vfs/event_list.go b/pkg/sentry/vfs/event_list.go
new file mode 100644
index 000000000..c0946b585
--- /dev/null
+++ b/pkg/sentry/vfs/event_list.go
@@ -0,0 +1,221 @@
+package vfs
+
+// ElementMapper provides an identity mapping by default.
+//
+// This can be replaced to provide a struct that maps elements to linker
+// objects, if they are not the same. An ElementMapper is not typically
+// required if: Linker is left as is, Element is left as is, or Linker and
+// Element are the same type.
+type eventElementMapper struct{}
+
+// linkerFor maps an Element to a Linker.
+//
+// This default implementation should be inlined.
+//
+//go:nosplit
+func (eventElementMapper) linkerFor(elem *Event) *Event { return elem }
+
+// List is an intrusive list. Entries can be added to or removed from the list
+// in O(1) time and with no additional memory allocations.
+//
+// The zero value for List is an empty list ready to use.
+//
+// To iterate over a list (where l is a List):
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.
+// }
+//
+// +stateify savable
+type eventList struct {
+ head *Event
+ tail *Event
+}
+
+// Reset resets list l to the empty state.
+func (l *eventList) Reset() {
+ l.head = nil
+ l.tail = nil
+}
+
+// Empty returns true iff the list is empty.
+//
+//go:nosplit
+func (l *eventList) Empty() bool {
+ return l.head == nil
+}
+
+// Front returns the first element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Front() *Event {
+ return l.head
+}
+
+// Back returns the last element of list l or nil.
+//
+//go:nosplit
+func (l *eventList) Back() *Event {
+ return l.tail
+}
+
+// Len returns the number of elements in the list.
+//
+// NOTE: This is an O(n) operation.
+//
+//go:nosplit
+func (l *eventList) Len() (count int) {
+ for e := l.Front(); e != nil; e = (eventElementMapper{}.linkerFor(e)).Next() {
+ count++
+ }
+ return count
+}
+
+// PushFront inserts the element e at the front of list l.
+//
+//go:nosplit
+func (l *eventList) PushFront(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(l.head)
+ linker.SetPrev(nil)
+ if l.head != nil {
+ eventElementMapper{}.linkerFor(l.head).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+
+ l.head = e
+}
+
+// PushBack inserts the element e at the back of list l.
+//
+//go:nosplit
+func (l *eventList) PushBack(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ linker.SetNext(nil)
+ linker.SetPrev(l.tail)
+ if l.tail != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(e)
+ } else {
+ l.head = e
+ }
+
+ l.tail = e
+}
+
+// PushBackList inserts list m at the end of list l, emptying m.
+//
+//go:nosplit
+func (l *eventList) PushBackList(m *eventList) {
+ if l.head == nil {
+ l.head = m.head
+ l.tail = m.tail
+ } else if m.head != nil {
+ eventElementMapper{}.linkerFor(l.tail).SetNext(m.head)
+ eventElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
+
+ l.tail = m.tail
+ }
+ m.head = nil
+ m.tail = nil
+}
+
+// InsertAfter inserts e after b.
+//
+//go:nosplit
+func (l *eventList) InsertAfter(b, e *Event) {
+ bLinker := eventElementMapper{}.linkerFor(b)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ a := bLinker.Next()
+
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ bLinker.SetNext(e)
+
+ if a != nil {
+ eventElementMapper{}.linkerFor(a).SetPrev(e)
+ } else {
+ l.tail = e
+ }
+}
+
+// InsertBefore inserts e before a.
+//
+//go:nosplit
+func (l *eventList) InsertBefore(a, e *Event) {
+ aLinker := eventElementMapper{}.linkerFor(a)
+ eLinker := eventElementMapper{}.linkerFor(e)
+
+ b := aLinker.Prev()
+ eLinker.SetNext(a)
+ eLinker.SetPrev(b)
+ aLinker.SetPrev(e)
+
+ if b != nil {
+ eventElementMapper{}.linkerFor(b).SetNext(e)
+ } else {
+ l.head = e
+ }
+}
+
+// Remove removes e from l.
+//
+//go:nosplit
+func (l *eventList) Remove(e *Event) {
+ linker := eventElementMapper{}.linkerFor(e)
+ prev := linker.Prev()
+ next := linker.Next()
+
+ if prev != nil {
+ eventElementMapper{}.linkerFor(prev).SetNext(next)
+ } else if l.head == e {
+ l.head = next
+ }
+
+ if next != nil {
+ eventElementMapper{}.linkerFor(next).SetPrev(prev)
+ } else if l.tail == e {
+ l.tail = prev
+ }
+
+ linker.SetNext(nil)
+ linker.SetPrev(nil)
+}
+
+// Entry is a default implementation of Linker. Users can add anonymous fields
+// of this type to their structs to make them automatically implement the
+// methods needed by List.
+//
+// +stateify savable
+type eventEntry struct {
+ next *Event
+ prev *Event
+}
+
+// Next returns the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Next() *Event {
+ return e.next
+}
+
+// Prev returns the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) Prev() *Event {
+ return e.prev
+}
+
+// SetNext assigns 'entry' as the entry that follows e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetNext(elem *Event) {
+ e.next = elem
+}
+
+// SetPrev assigns 'entry' as the entry that precedes e in the list.
+//
+//go:nosplit
+func (e *eventEntry) SetPrev(elem *Event) {
+ e.prev = elem
+}
diff --git a/pkg/sentry/vfs/file_description_impl_util_test.go b/pkg/sentry/vfs/file_description_impl_util_test.go
deleted file mode 100644
index 1cd607c0a..000000000
--- a/pkg/sentry/vfs/file_description_impl_util_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package vfs
-
-import (
- "bytes"
- "fmt"
- "io"
- "sync/atomic"
- "testing"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// fileDescription is the common fd struct which a filesystem implementation
-// embeds in all of its file description implementations as required.
-type fileDescription struct {
- vfsfd FileDescription
- FileDescriptionDefaultImpl
- NoLockFD
-}
-
-// genCount contains the number of times its DynamicBytesSource.Generate()
-// implementation has been called.
-type genCount struct {
- count uint64 // accessed using atomic memory ops
-}
-
-// Generate implements DynamicBytesSource.Generate.
-func (g *genCount) Generate(ctx context.Context, buf *bytes.Buffer) error {
- fmt.Fprintf(buf, "%d", atomic.AddUint64(&g.count, 1))
- return nil
-}
-
-type storeData struct {
- data string
-}
-
-var _ WritableDynamicBytesSource = (*storeData)(nil)
-
-// Generate implements DynamicBytesSource.
-func (d *storeData) Generate(ctx context.Context, buf *bytes.Buffer) error {
- buf.WriteString(d.data)
- return nil
-}
-
-// Generate implements WritableDynamicBytesSource.
-func (d *storeData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
- buf := make([]byte, src.NumBytes())
- n, err := src.CopyIn(ctx, buf)
- if err != nil {
- return 0, err
- }
-
- d.data = string(buf[:n])
- return 0, nil
-}
-
-// testFD is a read-only FileDescriptionImpl representing a regular file.
-type testFD struct {
- fileDescription
- DynamicBytesFileDescriptionImpl
-
- data DynamicBytesSource
-}
-
-func newTestFD(ctx context.Context, vfsObj *VirtualFilesystem, statusFlags uint32, data DynamicBytesSource) *FileDescription {
- vd := vfsObj.NewAnonVirtualDentry("genCountFD")
- defer vd.DecRef(ctx)
- var fd testFD
- fd.vfsfd.Init(&fd, statusFlags, vd.Mount(), vd.Dentry(), &FileDescriptionOptions{})
- fd.DynamicBytesFileDescriptionImpl.SetDataSource(data)
- return &fd.vfsfd
-}
-
-// Release implements FileDescriptionImpl.Release.
-func (fd *testFD) Release(context.Context) {
-}
-
-// SetStatusFlags implements FileDescriptionImpl.SetStatusFlags.
-// Stat implements FileDescriptionImpl.Stat.
-func (fd *testFD) Stat(ctx context.Context, opts StatOptions) (linux.Statx, error) {
- // Note that Statx.Mask == 0 in the return value.
- return linux.Statx{}, nil
-}
-
-// SetStat implements FileDescriptionImpl.SetStat.
-func (fd *testFD) SetStat(ctx context.Context, opts SetStatOptions) error {
- return syserror.EPERM
-}
-
-func TestGenCountFD(t *testing.T) {
- ctx := contexttest.Context(t)
-
- vfsObj := &VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- fd := newTestFD(ctx, vfsObj, linux.O_RDWR, &genCount{})
- defer fd.DecRef(ctx)
-
- // The first read causes Generate to be called to fill the FD's buffer.
- buf := make([]byte, 2)
- ioseq := usermem.BytesIOSequence(buf)
- n, err := fd.Read(ctx, ioseq, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("first Read: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('1'); buf[0] != want {
- t.Errorf("first Read: got byte %c, wanted %c", buf[0], want)
- }
-
- // A second read without seeking is still at EOF.
- n, err = fd.Read(ctx, ioseq, ReadOptions{})
- if n != 0 || err != io.EOF {
- t.Fatalf("second Read: got (%d, %v), wanted (0, EOF)", n, err)
- }
-
- // Seeking to the beginning of the file causes it to be regenerated.
- n, err = fd.Seek(ctx, 0, linux.SEEK_SET)
- if n != 0 || err != nil {
- t.Fatalf("Seek: got (%d, %v), wanted (0, nil)", n, err)
- }
- n, err = fd.Read(ctx, ioseq, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("Read after Seek: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('2'); buf[0] != want {
- t.Errorf("Read after Seek: got byte %c, wanted %c", buf[0], want)
- }
-
- // PRead at the beginning of the file also causes it to be regenerated.
- n, err = fd.PRead(ctx, ioseq, 0, ReadOptions{})
- if n != 1 || (err != nil && err != io.EOF) {
- t.Fatalf("PRead: got (%d, %v), wanted (1, nil or EOF)", n, err)
- }
- if want := byte('3'); buf[0] != want {
- t.Errorf("PRead: got byte %c, wanted %c", buf[0], want)
- }
-
- // Write and PWrite fails.
- if _, err := fd.Write(ctx, ioseq, WriteOptions{}); err != syserror.EIO {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
- }
- if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); err != syserror.EIO {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
- }
-}
-
-func TestWritable(t *testing.T) {
- ctx := contexttest.Context(t)
-
- vfsObj := &VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- fd := newTestFD(ctx, vfsObj, linux.O_RDWR, &storeData{data: "init"})
- defer fd.DecRef(ctx)
-
- buf := make([]byte, 10)
- ioseq := usermem.BytesIOSequence(buf)
- if n, err := fd.Read(ctx, ioseq, ReadOptions{}); n != 4 && err != io.EOF {
- t.Fatalf("Read: got (%v, %v), wanted (4, EOF)", n, err)
- }
- if want := "init"; want == string(buf) {
- t.Fatalf("Read: got %v, wanted %v", string(buf), want)
- }
-
- // Test PWrite.
- want := "write"
- writeIOSeq := usermem.BytesIOSequence([]byte(want))
- if n, err := fd.PWrite(ctx, writeIOSeq, 0, WriteOptions{}); int(n) != len(want) && err != nil {
- t.Errorf("PWrite: got err (%v, %v), wanted (%v, nil)", n, err, len(want))
- }
- if n, err := fd.PRead(ctx, ioseq, 0, ReadOptions{}); int(n) != len(want) && err != io.EOF {
- t.Fatalf("PRead: got (%v, %v), wanted (%v, EOF)", n, err, len(want))
- }
- if want == string(buf) {
- t.Fatalf("PRead: got %v, wanted %v", string(buf), want)
- }
-
- // Test Seek to 0 followed by Write.
- want = "write2"
- writeIOSeq = usermem.BytesIOSequence([]byte(want))
- if n, err := fd.Seek(ctx, 0, linux.SEEK_SET); n != 0 && err != nil {
- t.Errorf("Seek: got err (%v, %v), wanted (0, nil)", n, err)
- }
- if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); int(n) != len(want) && err != nil {
- t.Errorf("Write: got err (%v, %v), wanted (%v, nil)", n, err, len(want))
- }
- if n, err := fd.PRead(ctx, ioseq, 0, ReadOptions{}); int(n) != len(want) && err != io.EOF {
- t.Fatalf("PRead: got (%v, %v), wanted (%v, EOF)", n, err, len(want))
- }
- if want == string(buf) {
- t.Fatalf("PRead: got %v, wanted %v", string(buf), want)
- }
-
- // Test failure if offset != 0.
- if n, err := fd.Seek(ctx, 1, linux.SEEK_SET); n != 0 && err != nil {
- t.Errorf("Seek: got err (%v, %v), wanted (0, nil)", n, err)
- }
- if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); n != 0 && err != syserror.EINVAL {
- t.Errorf("Write: got err (%v, %v), wanted (0, EINVAL)", n, err)
- }
- if n, err := fd.PWrite(ctx, writeIOSeq, 2, WriteOptions{}); n != 0 && err != syserror.EINVAL {
- t.Errorf("PWrite: got err (%v, %v), wanted (0, EINVAL)", n, err)
- }
-}
diff --git a/pkg/sentry/vfs/file_description_refs.go b/pkg/sentry/vfs/file_description_refs.go
new file mode 100644
index 000000000..4f3531d75
--- /dev/null
+++ b/pkg/sentry/vfs/file_description_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FileDescriptionenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var FileDescriptionobj *FileDescription
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FileDescriptionRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FileDescriptionRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FileDescriptionRefs) RefType() string {
+ return fmt.Sprintf("%T", FileDescriptionobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FileDescriptionRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FileDescriptionRefs) LogRefs() bool {
+ return FileDescriptionenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FileDescriptionRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FileDescriptionRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FileDescriptionRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FileDescriptionRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FileDescriptionenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FileDescriptionRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/filesystem_refs.go b/pkg/sentry/vfs/filesystem_refs.go
new file mode 100644
index 000000000..a16815fda
--- /dev/null
+++ b/pkg/sentry/vfs/filesystem_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const FilesystemenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var Filesystemobj *Filesystem
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type FilesystemRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *FilesystemRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *FilesystemRefs) RefType() string {
+ return fmt.Sprintf("%T", Filesystemobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *FilesystemRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *FilesystemRefs) LogRefs() bool {
+ return FilesystemenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *FilesystemRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *FilesystemRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if FilesystemenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *FilesystemRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if FilesystemenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *FilesystemRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if FilesystemenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *FilesystemRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/g3doc/inotify.md b/pkg/sentry/vfs/g3doc/inotify.md
deleted file mode 100644
index 833db213f..000000000
--- a/pkg/sentry/vfs/g3doc/inotify.md
+++ /dev/null
@@ -1,210 +0,0 @@
-# Inotify
-
-Inotify is a mechanism for monitoring filesystem events in Linux--see
-inotify(7). An inotify instance can be used to monitor files and directories for
-modifications, creation/deletion, etc. The inotify API consists of system calls
-that create inotify instances (inotify_init/inotify_init1) and add/remove
-watches on files to an instance (inotify_add_watch/inotify_rm_watch). Events are
-generated from various places in the sentry, including the syscall layer, the
-vfs layer, the process fd table, and within each filesystem implementation. This
-document outlines the implementation details of inotify in VFS2.
-
-## Inotify Objects
-
-Inotify data structures are implemented in the vfs package.
-
-### vfs.Inotify
-
-Inotify instances are represented by vfs.Inotify objects, which implement
-vfs.FileDescriptionImpl. As in Linux, inotify fds are backed by a
-pseudo-filesystem (anonfs). Each inotify instance receives events from a set of
-vfs.Watch objects, which can be modified with inotify_add_watch(2) and
-inotify_rm_watch(2). An application can retrieve events by reading the inotify
-fd.
-
-### vfs.Watches
-
-The set of all watches held on a single file (i.e., the watch target) is stored
-in vfs.Watches. Each watch will belong to a different inotify instance (an
-instance can only have one watch on any watch target). The watches are stored in
-a map indexed by their vfs.Inotify owner’s id. Hard links and file descriptions
-to a single file will all share the same vfs.Watches (with the exception of the
-gofer filesystem, described in a later section). Activity on the target causes
-its vfs.Watches to generate notifications on its watches’ inotify instances.
-
-### vfs.Watch
-
-A single watch, owned by one inotify instance and applied to one watch target.
-Both the vfs.Inotify owner and vfs.Watches on the target will hold a vfs.Watch,
-which leads to some complicated locking behavior (see Lock Ordering). Whenever a
-watch is notified of an event on its target, it will queue events to its inotify
-instance for delivery to the user.
-
-### vfs.Event
-
-vfs.Event is a simple struct encapsulating all the fields for an inotify event.
-It is generated by vfs.Watches and forwarded to the watches' owners. It is
-serialized to the user during read(2) syscalls on the associated fs.Inotify's
-fd.
-
-## Lock Ordering
-
-There are three locks related to the inotify implementation:
-
-Inotify.mu: the inotify instance lock. Inotify.evMu: the inotify event queue
-lock. Watches.mu: the watch set lock, used to protect the collection of watches
-on a target.
-
-The correct lock ordering for inotify code is:
-
-Inotify.mu -> Watches.mu -> Inotify.evMu.
-
-Note that we use a distinct lock to protect the inotify event queue. If we
-simply used Inotify.mu, we could simultaneously have locks being acquired in the
-order of Inotify.mu -> Watches.mu and Watches.mu -> Inotify.mu, which would
-cause deadlocks. For instance, adding a watch to an inotify instance would
-require locking Inotify.mu, and then adding the same watch to the target would
-cause Watches.mu to be held. At the same time, generating an event on the target
-would require Watches.mu to be held before iterating through each watch, and
-then notifying the owner of each watch would cause Inotify.mu to be held.
-
-See the vfs package comment to understand how inotify locks fit into the overall
-ordering of filesystem locks.
-
-## Watch Targets in Different Filesystem Implementations
-
-In Linux, watches reside on inodes at the virtual filesystem layer. As a result,
-all hard links and file descriptions on a single file will all share the same
-watch set. In VFS2, there is no common inode structure across filesystem types
-(some may not even have inodes), so we have to plumb inotify support through
-each specific filesystem implementation. Some of the technical considerations
-are outlined below.
-
-### Tmpfs
-
-For filesystems with inodes, like tmpfs, the design is quite similar to that of
-Linux, where watches reside on the inode.
-
-### Pseudo-filesystems
-
-Technically, because inotify is implemented at the vfs layer in Linux,
-pseudo-filesystems on top of kernfs support inotify passively. However, watches
-can only track explicit filesystem operations like read/write, open/close,
-mknod, etc., so watches on a target like /proc/self/fd will not generate events
-every time a new fd is added or removed. As of this writing, we leave inotify
-unimplemented in kernfs and anonfs; it does not seem particularly useful.
-
-### Gofer Filesystem (fsimpl/gofer)
-
-The gofer filesystem has several traits that make it difficult to support
-inotify:
-
-* **There are no inodes.** A file is represented as a dentry that holds an
- unopened p9 file (and possibly an open FID), through which the Sentry
- interacts with the gofer.
- * *Solution:* Because there is no inode structure stored in the sandbox,
- inotify watches must be held on the dentry. For the purposes of inotify,
- we assume that every dentry corresponds to a unique inode, which may
- cause unexpected behavior in the presence of hard links, where multiple
- dentries should share the same set of watches. Indeed, it is impossible
- for us to be absolutely sure whether dentries correspond to the same
- file or not, due to the following point:
-* **The Sentry cannot always be aware of hard links on the remote
- filesystem.** There is no way for us to confirm whether two files on the
- remote filesystem are actually links to the same inode. QIDs and inodes are
- not always 1:1. The assumption that dentries and inodes are 1:1 is
- inevitably broken if there are remote hard links that we cannot detect.
- * *Solution:* this is an issue with gofer fs in general, not only inotify,
- and we will have to live with it.
-* **Dentries can be cached, and then evicted.** Dentry lifetime does not
- correspond to file lifetime. Because gofer fs is not entirely in-memory, the
- absence of a dentry does not mean that the corresponding file does not
- exist, nor does a dentry reaching zero references mean that the
- corresponding file no longer exists. When a dentry reaches zero references,
- it will be cached, in case the file at that path is needed again in the
- future. However, the dentry may be evicted from the cache, which will cause
- a new dentry to be created next time the same file path is used. The
- existing watches will be lost.
- * *Solution:* When a dentry reaches zero references, do not cache it if it
- has any watches, so we can avoid eviction/destruction. Note that if the
- dentry was deleted or invalidated (d.vfsd.IsDead()), we should still
- destroy it along with its watches. Additionally, when a dentry’s last
- watch is removed, we cache it if it also has zero references. This way,
- the dentry can eventually be evicted from memory if it is no longer
- needed.
-* **Dentries can be invalidated.** Another issue with dentry lifetime is that
- the remote file at the file path represented may change from underneath the
- dentry. In this case, the next time that the dentry is used, it will be
- invalidated and a new dentry will replace it. In this case, it is not clear
- what should be done with the watches on the old dentry.
- * *Solution:* Silently destroy the watches when invalidation occurs. We
- have no way of knowing exactly what happened, when it happens. Inotify
- instances on NFS files in Linux probably behave in a similar fashion,
- since inotify is implemented at the vfs layer and is not aware of the
- complexities of remote file systems.
- * An alternative would be to issue some kind of event upon invalidation,
- e.g. a delete event, but this has several issues:
- * We cannot discern whether the remote file was invalidated because it was
- moved, deleted, etc. This information is crucial, because these cases
- should result in different events. Furthermore, the watches should only
- be destroyed if the file has been deleted.
- * Moreover, the mechanism for detecting whether the underlying file has
- changed is to check whether a new QID is given by the gofer. This may
- result in false positives, e.g. suppose that the server closed and
- re-opened the same file, which may result in a new QID.
- * Finally, the time of the event may be completely different from the time
- of the file modification, since a dentry is not immediately notified
- when the underlying file has changed. It would be quite unexpected to
- receive the notification when invalidation was triggered, i.e. the next
- time the file was accessed within the sandbox, because then the
- read/write/etc. operation on the file would not result in the expected
- event.
- * Another point in favor of the first solution: inotify in Linux can
- already be lossy on local filesystems (one of the sacrifices made so
- that filesystem performance isn’t killed), and it is lossy on NFS for
- similar reasons to gofer fs. Therefore, it is better for inotify to be
- silent than to emit incorrect notifications.
-* **There may be external users of the remote filesystem.** We can only track
- operations performed on the file within the sandbox. This is sufficient
- under InteropModeExclusive, but whenever there are external users, the set
- of actions we are aware of is incomplete.
- * *Solution:* We could either return an error or just issue a warning when
- inotify is used without InteropModeExclusive. Although faulty, VFS1
- allows it when the filesystem is shared, and Linux does the same for
- remote filesystems (as mentioned above, inotify sits at the vfs level).
-
-## Dentry Interface
-
-For events that must be generated above the vfs layer, we provide the following
-DentryImpl methods to allow interactions with targets on any FilesystemImpl:
-
-* **InotifyWithParent()** generates events on the dentry’s watches as well as
- its parent’s.
-* **Watches()** retrieves the watch set of the target represented by the
- dentry. This is used to access and modify watches on a target.
-* **OnZeroWatches()** performs cleanup tasks after the last watch is removed
- from a dentry. This is needed by gofer fs, which must allow a watched dentry
- to be cached once it has no more watches. Most implementations can just do
- nothing. Note that OnZeroWatches() must be called after all inotify locks
- are released to preserve lock ordering, since it may acquire
- FilesystemImpl-specific locks.
-
-## IN_EXCL_UNLINK
-
-There are several options that can be set for a watch, specified as part of the
-mask in inotify_add_watch(2). In particular, IN_EXCL_UNLINK requires some
-additional support in each filesystem.
-
-A watch with IN_EXCL_UNLINK will not generate events for its target if it
-corresponds to a path that was unlinked. For instance, if an fd is opened on
-“foo/bar” and “foo/bar” is subsequently unlinked, any reads/writes/etc. on the
-fd will be ignored by watches on “foo” or “foo/bar” with IN_EXCL_UNLINK. This
-requires each DentryImpl to keep track of whether it has been unlinked, in order
-to determine whether events should be sent to watches with IN_EXCL_UNLINK.
-
-## IN_ONESHOT
-
-One-shot watches expire after generating a single event. When an event occurs,
-all one-shot watches on the target that successfully generated an event are
-removed. Lock ordering can cause the management of one-shot watches to be quite
-expensive; see Watches.Notify() for more information.
diff --git a/pkg/sentry/vfs/genericfstree/BUILD b/pkg/sentry/vfs/genericfstree/BUILD
deleted file mode 100644
index d8fd92677..000000000
--- a/pkg/sentry/vfs/genericfstree/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics:defs.bzl", "go_template")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-go_template(
- name = "generic_fstree",
- srcs = [
- "genericfstree.go",
- ],
- types = [
- "Dentry",
- ],
-)
diff --git a/pkg/sentry/vfs/genericfstree/genericfstree.go b/pkg/sentry/vfs/genericfstree/genericfstree.go
deleted file mode 100644
index ba6e6ed49..000000000
--- a/pkg/sentry/vfs/genericfstree/genericfstree.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package genericfstree provides tools for implementing vfs.FilesystemImpls
-// where a single statically-determined lock or set of locks is sufficient to
-// ensure that a Dentry's name and parent are contextually immutable.
-//
-// Clients using this package must use the go_template_instance rule in
-// tools/go_generics/defs.bzl to create an instantiation of this template
-// package, providing types to use in place of Dentry.
-package genericfstree
-
-import (
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// Dentry is a required type parameter that is a struct with the given fields.
-//
-// +stateify savable
-type Dentry struct {
- // vfsd is the embedded vfs.Dentry corresponding to this vfs.DentryImpl.
- vfsd vfs.Dentry
-
- // parent is the parent of this Dentry in the filesystem's tree. If this
- // Dentry is a filesystem root, parent is nil.
- parent *Dentry
-
- // name is the name of this Dentry in its parent. If this Dentry is a
- // filesystem root, name is unspecified.
- name string
-}
-
-// IsAncestorDentry returns true if d is an ancestor of d2; that is, d is
-// either d2's parent or an ancestor of d2's parent.
-func IsAncestorDentry(d, d2 *Dentry) bool {
- for d2 != nil { // Stop at root, where d2.parent == nil.
- if d2.parent == d {
- return true
- }
- if d2.parent == d2 {
- return false
- }
- d2 = d2.parent
- }
- return false
-}
-
-// ParentOrSelf returns d.parent. If d.parent is nil, ParentOrSelf returns d.
-func ParentOrSelf(d *Dentry) *Dentry {
- if d.parent != nil {
- return d.parent
- }
- return d
-}
-
-// PrependPath is a generic implementation of FilesystemImpl.PrependPath().
-func PrependPath(vfsroot vfs.VirtualDentry, mnt *vfs.Mount, d *Dentry, b *fspath.Builder) error {
- for {
- if mnt == vfsroot.Mount() && &d.vfsd == vfsroot.Dentry() {
- return vfs.PrependPathAtVFSRootError{}
- }
- if mnt != nil && &d.vfsd == mnt.Root() {
- return nil
- }
- if d.parent == nil {
- return vfs.PrependPathAtNonMountRootError{}
- }
- b.PrependComponent(d.name)
- d = d.parent
- }
-}
-
-// DebugPathname returns a pathname to d relative to its filesystem root.
-// DebugPathname does not correspond to any Linux function; it's used to
-// generate dentry pathnames for debugging.
-func DebugPathname(d *Dentry) string {
- var b fspath.Builder
- _ = PrependPath(vfs.VirtualDentry{}, nil, d, &b)
- return b.String()
-}
diff --git a/pkg/sentry/vfs/memxattr/BUILD b/pkg/sentry/vfs/memxattr/BUILD
deleted file mode 100644
index ea82f4987..000000000
--- a/pkg/sentry/vfs/memxattr/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "memxattr",
- srcs = ["xattr.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go b/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go
new file mode 100644
index 000000000..f75223725
--- /dev/null
+++ b/pkg/sentry/vfs/memxattr/memxattr_state_autogen.go
@@ -0,0 +1,36 @@
+// automatically generated by stateify.
+
+package memxattr
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (x *SimpleExtendedAttributes) StateTypeName() string {
+ return "pkg/sentry/vfs/memxattr.SimpleExtendedAttributes"
+}
+
+func (x *SimpleExtendedAttributes) StateFields() []string {
+ return []string{
+ "xattrs",
+ }
+}
+
+func (x *SimpleExtendedAttributes) beforeSave() {}
+
+// +checklocksignore
+func (x *SimpleExtendedAttributes) StateSave(stateSinkObject state.Sink) {
+ x.beforeSave()
+ stateSinkObject.Save(0, &x.xattrs)
+}
+
+func (x *SimpleExtendedAttributes) afterLoad() {}
+
+// +checklocksignore
+func (x *SimpleExtendedAttributes) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &x.xattrs)
+}
+
+func init() {
+ state.Register((*SimpleExtendedAttributes)(nil))
+}
diff --git a/pkg/sentry/vfs/mount_namespace_refs.go b/pkg/sentry/vfs/mount_namespace_refs.go
new file mode 100644
index 000000000..a44e73aed
--- /dev/null
+++ b/pkg/sentry/vfs/mount_namespace_refs.go
@@ -0,0 +1,140 @@
+package vfs
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/refsvfs2"
+)
+
+// enableLogging indicates whether reference-related events should be logged (with
+// stack traces). This is false by default and should only be set to true for
+// debugging purposes, as it can generate an extremely large amount of output
+// and drastically degrade performance.
+const MountNamespaceenableLogging = false
+
+// obj is used to customize logging. Note that we use a pointer to T so that
+// we do not copy the entire object when passed as a format parameter.
+var MountNamespaceobj *MountNamespace
+
+// Refs implements refs.RefCounter. It keeps a reference count using atomic
+// operations and calls the destructor when the count reaches zero.
+//
+// NOTE: Do not introduce additional fields to the Refs struct. It is used by
+// many filesystem objects, and we want to keep it as small as possible (i.e.,
+// the same size as using an int64 directly) to avoid taking up extra cache
+// space. In general, this template should not be extended at the cost of
+// performance. If it does not offer enough flexibility for a particular object
+// (example: b/187877947), we should implement the RefCounter/CheckedObject
+// interfaces manually.
+//
+// +stateify savable
+type MountNamespaceRefs struct {
+ // refCount is composed of two fields:
+ //
+ // [32-bit speculative references]:[32-bit real references]
+ //
+ // Speculative references are used for TryIncRef, to avoid a CompareAndSwap
+ // loop. See IncRef, DecRef and TryIncRef for details of how these fields are
+ // used.
+ refCount int64
+}
+
+// InitRefs initializes r with one reference and, if enabled, activates leak
+// checking.
+func (r *MountNamespaceRefs) InitRefs() {
+ atomic.StoreInt64(&r.refCount, 1)
+ refsvfs2.Register(r)
+}
+
+// RefType implements refsvfs2.CheckedObject.RefType.
+func (r *MountNamespaceRefs) RefType() string {
+ return fmt.Sprintf("%T", MountNamespaceobj)[1:]
+}
+
+// LeakMessage implements refsvfs2.CheckedObject.LeakMessage.
+func (r *MountNamespaceRefs) LeakMessage() string {
+ return fmt.Sprintf("[%s %p] reference count of %d instead of 0", r.RefType(), r, r.ReadRefs())
+}
+
+// LogRefs implements refsvfs2.CheckedObject.LogRefs.
+func (r *MountNamespaceRefs) LogRefs() bool {
+ return MountNamespaceenableLogging
+}
+
+// ReadRefs returns the current number of references. The returned count is
+// inherently racy and is unsafe to use without external synchronization.
+func (r *MountNamespaceRefs) ReadRefs() int64 {
+ return atomic.LoadInt64(&r.refCount)
+}
+
+// IncRef implements refs.RefCounter.IncRef.
+//
+//go:nosplit
+func (r *MountNamespaceRefs) IncRef() {
+ v := atomic.AddInt64(&r.refCount, 1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogIncRef(r, v)
+ }
+ if v <= 1 {
+ panic(fmt.Sprintf("Incrementing non-positive count %p on %s", r, r.RefType()))
+ }
+}
+
+// TryIncRef implements refs.RefCounter.TryIncRef.
+//
+// To do this safely without a loop, a speculative reference is first acquired
+// on the object. This allows multiple concurrent TryIncRef calls to distinguish
+// other TryIncRef calls from genuine references held.
+//
+//go:nosplit
+func (r *MountNamespaceRefs) TryIncRef() bool {
+ const speculativeRef = 1 << 32
+ if v := atomic.AddInt64(&r.refCount, speculativeRef); int32(v) == 0 {
+
+ atomic.AddInt64(&r.refCount, -speculativeRef)
+ return false
+ }
+
+ v := atomic.AddInt64(&r.refCount, -speculativeRef+1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogTryIncRef(r, v)
+ }
+ return true
+}
+
+// DecRef implements refs.RefCounter.DecRef.
+//
+// Note that speculative references are counted here. Since they were added
+// prior to real references reaching zero, they will successfully convert to
+// real references. In other words, we see speculative references only in the
+// following case:
+//
+// A: TryIncRef [speculative increase => sees non-negative references]
+// B: DecRef [real decrease]
+// A: TryIncRef [transform speculative to real]
+//
+//go:nosplit
+func (r *MountNamespaceRefs) DecRef(destroy func()) {
+ v := atomic.AddInt64(&r.refCount, -1)
+ if MountNamespaceenableLogging {
+ refsvfs2.LogDecRef(r, v)
+ }
+ switch {
+ case v < 0:
+ panic(fmt.Sprintf("Decrementing non-positive ref count %p, owned by %s", r, r.RefType()))
+
+ case v == 0:
+ refsvfs2.Unregister(r)
+
+ if destroy != nil {
+ destroy()
+ }
+ }
+}
+
+func (r *MountNamespaceRefs) afterLoad() {
+ if r.ReadRefs() > 0 {
+ refsvfs2.Register(r)
+ }
+}
diff --git a/pkg/sentry/vfs/mount_test.go b/pkg/sentry/vfs/mount_test.go
deleted file mode 100644
index cb882a983..000000000
--- a/pkg/sentry/vfs/mount_test.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package vfs
-
-import (
- "fmt"
- "runtime"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestMountTableLookupEmpty(t *testing.T) {
- var mt mountTable
- mt.Init()
-
- parent := &Mount{}
- point := &Dentry{}
- if m := mt.Lookup(parent, point); m != nil {
- t.Errorf("Empty mountTable lookup: got %p, wanted nil", m)
- }
-}
-
-func TestMountTableInsertLookup(t *testing.T) {
- var mt mountTable
- mt.Init()
-
- mount := &Mount{}
- mount.setKey(VirtualDentry{&Mount{}, &Dentry{}})
- mt.Insert(mount)
-
- if m := mt.Lookup(mount.parent(), mount.point()); m != mount {
- t.Errorf("mountTable positive lookup: got %p, wanted %p", m, mount)
- }
-
- otherParent := &Mount{}
- if m := mt.Lookup(otherParent, mount.point()); m != nil {
- t.Errorf("mountTable lookup with wrong mount parent: got %p, wanted nil", m)
- }
- otherPoint := &Dentry{}
- if m := mt.Lookup(mount.parent(), otherPoint); m != nil {
- t.Errorf("mountTable lookup with wrong mount point: got %p, wanted nil", m)
- }
-}
-
-// TODO(gvisor.dev/issue/1035): concurrent lookup/insertion/removal.
-
-// must be powers of 2
-var benchNumMounts = []int{1 << 2, 1 << 5, 1 << 8}
-
-// For all of the following:
-//
-// - BenchmarkMountTableFoo tests usage pattern "Foo" for mountTable.
-//
-// - BenchmarkMountMapFoo tests usage pattern "Foo" for a
-// sync.RWMutex-protected map. (Mutator benchmarks do not use a RWMutex, since
-// mountTable also requires external synchronization between mutators.)
-//
-// - BenchmarkMountSyncMapFoo tests usage pattern "Foo" for a sync.Map.
-//
-// ParallelLookup is by far the most common and performance-sensitive operation
-// for this application. NegativeLookup is also important, but less so (only
-// relevant with multiple mount namespaces and significant differences in
-// mounts between them). Insertion and removal are benchmarked for
-// completeness.
-const enableComparativeBenchmarks = false
-
-func newBenchMount() *Mount {
- mount := &Mount{}
- mount.loadKey(VirtualDentry{&Mount{}, &Dentry{}})
- return mount
-}
-
-func BenchmarkMountTableParallelLookup(b *testing.B) {
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var mt mountTable
- mt.Init()
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- mt.Insert(mount)
- keys = append(keys, mount.saveKey())
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- m := mt.Lookup(k.mount, k.dentry)
- if m == nil {
- b.Errorf("Lookup failed")
- return
- }
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountMapParallelLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var mu sync.RWMutex
- ms := make(map[VirtualDentry]*Mount)
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- key := mount.saveKey()
- ms[key] = mount
- keys = append(keys, key)
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- mu.RLock()
- m := ms[k]
- mu.RUnlock()
- if m == nil {
- b.Errorf("Lookup failed")
- return
- }
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountSyncMapParallelLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%dx%d", numG, numMounts)
- b.Run(desc, func(b *testing.B) {
- var ms sync.Map
- keys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- key := mount.getKey()
- ms.Store(key, mount)
- keys = append(keys, key)
- }
-
- var ready sync.WaitGroup
- begin := make(chan struct{})
- var end sync.WaitGroup
- for g := 0; g < numG; g++ {
- ready.Add(1)
- end.Add(1)
- go func() {
- defer end.Done()
- ready.Done()
- <-begin
- for i := 0; i < b.N; i++ {
- k := keys[i&(numMounts-1)]
- mi, ok := ms.Load(k)
- if !ok {
- b.Errorf("Lookup failed")
- return
- }
- m := mi.(*Mount)
- if parent := m.parent(); parent != k.mount {
- b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount)
- return
- }
- if point := m.point(); point != k.dentry {
- b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry)
- return
- }
- }
- }()
- }
-
- ready.Wait()
- b.ResetTimer()
- close(begin)
- end.Wait()
- })
- }
- }
-}
-
-func BenchmarkMountTableNegativeLookup(b *testing.B) {
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var mt mountTable
- mt.Init()
- for i := 0; i < numMounts; i++ {
- mt.Insert(newBenchMount())
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- m := mt.Lookup(k.mount, k.dentry)
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountMapNegativeLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var mu sync.RWMutex
- ms := make(map[VirtualDentry]*Mount)
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- ms[mount.getKey()] = mount
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- mu.RLock()
- m := ms[k]
- mu.RUnlock()
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountSyncMapNegativeLookup(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- for _, numMounts := range benchNumMounts {
- desc := fmt.Sprintf("%d", numMounts)
- b.Run(desc, func(b *testing.B) {
- var ms sync.Map
- for i := 0; i < numMounts; i++ {
- mount := newBenchMount()
- ms.Store(mount.saveKey(), mount)
- }
- negkeys := make([]VirtualDentry, 0, numMounts)
- for i := 0; i < numMounts; i++ {
- negkeys = append(negkeys, VirtualDentry{
- mount: &Mount{},
- dentry: &Dentry{},
- })
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- k := negkeys[i&(numMounts-1)]
- m, _ := ms.Load(k)
- if m != nil {
- b.Fatalf("Lookup got %p, wanted nil", m)
- }
- }
- })
- }
-}
-
-func BenchmarkMountTableInsert(b *testing.B) {
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- var mt mountTable
- mt.Init()
- b.ResetTimer()
- for i := range mounts {
- mt.Insert(mounts[i])
- }
-}
-
-func BenchmarkMountMapInsert(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- ms := make(map[VirtualDentry]*Mount)
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms[mount.saveKey()] = mount
- }
-}
-
-func BenchmarkMountSyncMapInsert(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- // Preallocate Mounts so that allocation time isn't included in the
- // benchmark.
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
-
- var ms sync.Map
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms.Store(mount.saveKey(), mount)
- }
-}
-
-func BenchmarkMountTableRemove(b *testing.B) {
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- var mt mountTable
- mt.Init()
- for i := range mounts {
- mt.Insert(mounts[i])
- }
-
- b.ResetTimer()
- for i := range mounts {
- mt.Remove(mounts[i])
- }
-}
-
-func BenchmarkMountMapRemove(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- ms := make(map[VirtualDentry]*Mount)
- for i := range mounts {
- mount := mounts[i]
- ms[mount.saveKey()] = mount
- }
-
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- delete(ms, mount.saveKey())
- }
-}
-
-func BenchmarkMountSyncMapRemove(b *testing.B) {
- if !enableComparativeBenchmarks {
- b.Skipf("comparative benchmarks are disabled")
- }
-
- mounts := make([]*Mount, 0, b.N)
- for i := 0; i < b.N; i++ {
- mounts = append(mounts, newBenchMount())
- }
- var ms sync.Map
- for i := range mounts {
- mount := mounts[i]
- ms.Store(mount.saveKey(), mount)
- }
-
- b.ResetTimer()
- for i := range mounts {
- mount := mounts[i]
- ms.Delete(mount.saveKey())
- }
-}
diff --git a/pkg/sentry/vfs/vfs_state_autogen.go b/pkg/sentry/vfs/vfs_state_autogen.go
new file mode 100644
index 000000000..364adcc0f
--- /dev/null
+++ b/pkg/sentry/vfs/vfs_state_autogen.go
@@ -0,0 +1,2052 @@
+// automatically generated by stateify.
+
+package vfs
+
+import (
+ "gvisor.dev/gvisor/pkg/state"
+)
+
+func (a *anonFilesystemType) StateTypeName() string {
+ return "pkg/sentry/vfs.anonFilesystemType"
+}
+
+func (a *anonFilesystemType) StateFields() []string {
+ return []string{}
+}
+
+func (a *anonFilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (a *anonFilesystemType) StateSave(stateSinkObject state.Sink) {
+ a.beforeSave()
+}
+
+func (a *anonFilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (a *anonFilesystemType) StateLoad(stateSourceObject state.Source) {
+}
+
+func (fs *anonFilesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.anonFilesystem"
+}
+
+func (fs *anonFilesystem) StateFields() []string {
+ return []string{
+ "vfsfs",
+ "devMinor",
+ }
+}
+
+func (fs *anonFilesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *anonFilesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.vfsfs)
+ stateSinkObject.Save(1, &fs.devMinor)
+}
+
+func (fs *anonFilesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *anonFilesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.vfsfs)
+ stateSourceObject.Load(1, &fs.devMinor)
+}
+
+func (d *anonDentry) StateTypeName() string {
+ return "pkg/sentry/vfs.anonDentry"
+}
+
+func (d *anonDentry) StateFields() []string {
+ return []string{
+ "vfsd",
+ "name",
+ }
+}
+
+func (d *anonDentry) beforeSave() {}
+
+// +checklocksignore
+func (d *anonDentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.vfsd)
+ stateSinkObject.Save(1, &d.name)
+}
+
+func (d *anonDentry) afterLoad() {}
+
+// +checklocksignore
+func (d *anonDentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.vfsd)
+ stateSourceObject.Load(1, &d.name)
+}
+
+func (d *Dentry) StateTypeName() string {
+ return "pkg/sentry/vfs.Dentry"
+}
+
+func (d *Dentry) StateFields() []string {
+ return []string{
+ "dead",
+ "mounts",
+ "impl",
+ }
+}
+
+func (d *Dentry) beforeSave() {}
+
+// +checklocksignore
+func (d *Dentry) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.dead)
+ stateSinkObject.Save(1, &d.mounts)
+ stateSinkObject.Save(2, &d.impl)
+}
+
+func (d *Dentry) afterLoad() {}
+
+// +checklocksignore
+func (d *Dentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.dead)
+ stateSourceObject.Load(1, &d.mounts)
+ stateSourceObject.Load(2, &d.impl)
+}
+
+func (kind *DeviceKind) StateTypeName() string {
+ return "pkg/sentry/vfs.DeviceKind"
+}
+
+func (kind *DeviceKind) StateFields() []string {
+ return nil
+}
+
+func (d *devTuple) StateTypeName() string {
+ return "pkg/sentry/vfs.devTuple"
+}
+
+func (d *devTuple) StateFields() []string {
+ return []string{
+ "kind",
+ "major",
+ "minor",
+ }
+}
+
+func (d *devTuple) beforeSave() {}
+
+// +checklocksignore
+func (d *devTuple) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.kind)
+ stateSinkObject.Save(1, &d.major)
+ stateSinkObject.Save(2, &d.minor)
+}
+
+func (d *devTuple) afterLoad() {}
+
+// +checklocksignore
+func (d *devTuple) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.kind)
+ stateSourceObject.Load(1, &d.major)
+ stateSourceObject.Load(2, &d.minor)
+}
+
+func (r *registeredDevice) StateTypeName() string {
+ return "pkg/sentry/vfs.registeredDevice"
+}
+
+func (r *registeredDevice) StateFields() []string {
+ return []string{
+ "dev",
+ "opts",
+ }
+}
+
+func (r *registeredDevice) beforeSave() {}
+
+// +checklocksignore
+func (r *registeredDevice) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.dev)
+ stateSinkObject.Save(1, &r.opts)
+}
+
+func (r *registeredDevice) afterLoad() {}
+
+// +checklocksignore
+func (r *registeredDevice) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.dev)
+ stateSourceObject.Load(1, &r.opts)
+}
+
+func (r *RegisterDeviceOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RegisterDeviceOptions"
+}
+
+func (r *RegisterDeviceOptions) StateFields() []string {
+ return []string{
+ "GroupName",
+ }
+}
+
+func (r *RegisterDeviceOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RegisterDeviceOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.GroupName)
+}
+
+func (r *RegisterDeviceOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RegisterDeviceOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.GroupName)
+}
+
+func (ep *EpollInstance) StateTypeName() string {
+ return "pkg/sentry/vfs.EpollInstance"
+}
+
+func (ep *EpollInstance) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "q",
+ "interest",
+ "ready",
+ }
+}
+
+func (ep *EpollInstance) beforeSave() {}
+
+// +checklocksignore
+func (ep *EpollInstance) StateSave(stateSinkObject state.Sink) {
+ ep.beforeSave()
+ stateSinkObject.Save(0, &ep.vfsfd)
+ stateSinkObject.Save(1, &ep.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &ep.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &ep.NoLockFD)
+ stateSinkObject.Save(4, &ep.q)
+ stateSinkObject.Save(5, &ep.interest)
+ stateSinkObject.Save(6, &ep.ready)
+}
+
+func (ep *EpollInstance) afterLoad() {}
+
+// +checklocksignore
+func (ep *EpollInstance) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &ep.vfsfd)
+ stateSourceObject.Load(1, &ep.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &ep.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &ep.NoLockFD)
+ stateSourceObject.Load(4, &ep.q)
+ stateSourceObject.Load(5, &ep.interest)
+ stateSourceObject.Load(6, &ep.ready)
+}
+
+func (e *epollInterestKey) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestKey"
+}
+
+func (e *epollInterestKey) StateFields() []string {
+ return []string{
+ "file",
+ "num",
+ }
+}
+
+func (e *epollInterestKey) beforeSave() {}
+
+// +checklocksignore
+func (e *epollInterestKey) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.file)
+ stateSinkObject.Save(1, &e.num)
+}
+
+func (e *epollInterestKey) afterLoad() {}
+
+// +checklocksignore
+func (e *epollInterestKey) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.file)
+ stateSourceObject.Load(1, &e.num)
+}
+
+func (epi *epollInterest) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterest"
+}
+
+func (epi *epollInterest) StateFields() []string {
+ return []string{
+ "epoll",
+ "key",
+ "waiter",
+ "mask",
+ "ready",
+ "epollInterestEntry",
+ "userData",
+ }
+}
+
+func (epi *epollInterest) beforeSave() {}
+
+// +checklocksignore
+func (epi *epollInterest) StateSave(stateSinkObject state.Sink) {
+ epi.beforeSave()
+ stateSinkObject.Save(0, &epi.epoll)
+ stateSinkObject.Save(1, &epi.key)
+ stateSinkObject.Save(2, &epi.waiter)
+ stateSinkObject.Save(3, &epi.mask)
+ stateSinkObject.Save(4, &epi.ready)
+ stateSinkObject.Save(5, &epi.epollInterestEntry)
+ stateSinkObject.Save(6, &epi.userData)
+}
+
+// +checklocksignore
+func (epi *epollInterest) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.LoadWait(0, &epi.epoll)
+ stateSourceObject.Load(1, &epi.key)
+ stateSourceObject.Load(2, &epi.waiter)
+ stateSourceObject.Load(3, &epi.mask)
+ stateSourceObject.Load(4, &epi.ready)
+ stateSourceObject.Load(5, &epi.epollInterestEntry)
+ stateSourceObject.Load(6, &epi.userData)
+ stateSourceObject.AfterLoad(epi.afterLoad)
+}
+
+func (l *epollInterestList) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestList"
+}
+
+func (l *epollInterestList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *epollInterestList) beforeSave() {}
+
+// +checklocksignore
+func (l *epollInterestList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *epollInterestList) afterLoad() {}
+
+// +checklocksignore
+func (l *epollInterestList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *epollInterestEntry) StateTypeName() string {
+ return "pkg/sentry/vfs.epollInterestEntry"
+}
+
+func (e *epollInterestEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *epollInterestEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *epollInterestEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *epollInterestEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *epollInterestEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (l *eventList) StateTypeName() string {
+ return "pkg/sentry/vfs.eventList"
+}
+
+func (l *eventList) StateFields() []string {
+ return []string{
+ "head",
+ "tail",
+ }
+}
+
+func (l *eventList) beforeSave() {}
+
+// +checklocksignore
+func (l *eventList) StateSave(stateSinkObject state.Sink) {
+ l.beforeSave()
+ stateSinkObject.Save(0, &l.head)
+ stateSinkObject.Save(1, &l.tail)
+}
+
+func (l *eventList) afterLoad() {}
+
+// +checklocksignore
+func (l *eventList) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &l.head)
+ stateSourceObject.Load(1, &l.tail)
+}
+
+func (e *eventEntry) StateTypeName() string {
+ return "pkg/sentry/vfs.eventEntry"
+}
+
+func (e *eventEntry) StateFields() []string {
+ return []string{
+ "next",
+ "prev",
+ }
+}
+
+func (e *eventEntry) beforeSave() {}
+
+// +checklocksignore
+func (e *eventEntry) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.next)
+ stateSinkObject.Save(1, &e.prev)
+}
+
+func (e *eventEntry) afterLoad() {}
+
+// +checklocksignore
+func (e *eventEntry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.next)
+ stateSourceObject.Load(1, &e.prev)
+}
+
+func (fd *FileDescription) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescription"
+}
+
+func (fd *FileDescription) StateFields() []string {
+ return []string{
+ "FileDescriptionRefs",
+ "statusFlags",
+ "asyncHandler",
+ "epolls",
+ "vd",
+ "opts",
+ "readable",
+ "writable",
+ "usedLockBSD",
+ "impl",
+ }
+}
+
+// +checklocksignore
+func (fd *FileDescription) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.FileDescriptionRefs)
+ stateSinkObject.Save(1, &fd.statusFlags)
+ stateSinkObject.Save(2, &fd.asyncHandler)
+ stateSinkObject.Save(3, &fd.epolls)
+ stateSinkObject.Save(4, &fd.vd)
+ stateSinkObject.Save(5, &fd.opts)
+ stateSinkObject.Save(6, &fd.readable)
+ stateSinkObject.Save(7, &fd.writable)
+ stateSinkObject.Save(8, &fd.usedLockBSD)
+ stateSinkObject.Save(9, &fd.impl)
+}
+
+// +checklocksignore
+func (fd *FileDescription) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.FileDescriptionRefs)
+ stateSourceObject.Load(1, &fd.statusFlags)
+ stateSourceObject.Load(2, &fd.asyncHandler)
+ stateSourceObject.Load(3, &fd.epolls)
+ stateSourceObject.Load(4, &fd.vd)
+ stateSourceObject.Load(5, &fd.opts)
+ stateSourceObject.Load(6, &fd.readable)
+ stateSourceObject.Load(7, &fd.writable)
+ stateSourceObject.Load(8, &fd.usedLockBSD)
+ stateSourceObject.Load(9, &fd.impl)
+ stateSourceObject.AfterLoad(fd.afterLoad)
+}
+
+func (f *FileDescriptionOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionOptions"
+}
+
+func (f *FileDescriptionOptions) StateFields() []string {
+ return []string{
+ "AllowDirectIO",
+ "DenyPRead",
+ "DenyPWrite",
+ "UseDentryMetadata",
+ }
+}
+
+func (f *FileDescriptionOptions) beforeSave() {}
+
+// +checklocksignore
+func (f *FileDescriptionOptions) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+ stateSinkObject.Save(0, &f.AllowDirectIO)
+ stateSinkObject.Save(1, &f.DenyPRead)
+ stateSinkObject.Save(2, &f.DenyPWrite)
+ stateSinkObject.Save(3, &f.UseDentryMetadata)
+}
+
+func (f *FileDescriptionOptions) afterLoad() {}
+
+// +checklocksignore
+func (f *FileDescriptionOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &f.AllowDirectIO)
+ stateSourceObject.Load(1, &f.DenyPRead)
+ stateSourceObject.Load(2, &f.DenyPWrite)
+ stateSourceObject.Load(3, &f.UseDentryMetadata)
+}
+
+func (d *Dirent) StateTypeName() string {
+ return "pkg/sentry/vfs.Dirent"
+}
+
+func (d *Dirent) StateFields() []string {
+ return []string{
+ "Name",
+ "Type",
+ "Ino",
+ "NextOff",
+ }
+}
+
+func (d *Dirent) beforeSave() {}
+
+// +checklocksignore
+func (d *Dirent) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+ stateSinkObject.Save(0, &d.Name)
+ stateSinkObject.Save(1, &d.Type)
+ stateSinkObject.Save(2, &d.Ino)
+ stateSinkObject.Save(3, &d.NextOff)
+}
+
+func (d *Dirent) afterLoad() {}
+
+// +checklocksignore
+func (d *Dirent) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &d.Name)
+ stateSourceObject.Load(1, &d.Type)
+ stateSourceObject.Load(2, &d.Ino)
+ stateSourceObject.Load(3, &d.NextOff)
+}
+
+func (f *FileDescriptionDefaultImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionDefaultImpl"
+}
+
+func (f *FileDescriptionDefaultImpl) StateFields() []string {
+ return []string{}
+}
+
+func (f *FileDescriptionDefaultImpl) beforeSave() {}
+
+// +checklocksignore
+func (f *FileDescriptionDefaultImpl) StateSave(stateSinkObject state.Sink) {
+ f.beforeSave()
+}
+
+func (f *FileDescriptionDefaultImpl) afterLoad() {}
+
+// +checklocksignore
+func (f *FileDescriptionDefaultImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DirectoryFileDescriptionDefaultImpl"
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) StateFields() []string {
+ return []string{}
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) beforeSave() {}
+
+// +checklocksignore
+func (d *DirectoryFileDescriptionDefaultImpl) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *DirectoryFileDescriptionDefaultImpl) afterLoad() {}
+
+// +checklocksignore
+func (d *DirectoryFileDescriptionDefaultImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (d *DentryMetadataFileDescriptionImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DentryMetadataFileDescriptionImpl"
+}
+
+func (d *DentryMetadataFileDescriptionImpl) StateFields() []string {
+ return []string{}
+}
+
+func (d *DentryMetadataFileDescriptionImpl) beforeSave() {}
+
+// +checklocksignore
+func (d *DentryMetadataFileDescriptionImpl) StateSave(stateSinkObject state.Sink) {
+ d.beforeSave()
+}
+
+func (d *DentryMetadataFileDescriptionImpl) afterLoad() {}
+
+// +checklocksignore
+func (d *DentryMetadataFileDescriptionImpl) StateLoad(stateSourceObject state.Source) {
+}
+
+func (s *StaticData) StateTypeName() string {
+ return "pkg/sentry/vfs.StaticData"
+}
+
+func (s *StaticData) StateFields() []string {
+ return []string{
+ "Data",
+ }
+}
+
+func (s *StaticData) beforeSave() {}
+
+// +checklocksignore
+func (s *StaticData) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Data)
+}
+
+func (s *StaticData) afterLoad() {}
+
+// +checklocksignore
+func (s *StaticData) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Data)
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) StateTypeName() string {
+ return "pkg/sentry/vfs.DynamicBytesFileDescriptionImpl"
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) StateFields() []string {
+ return []string{
+ "data",
+ "buf",
+ "off",
+ "lastRead",
+ }
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) beforeSave() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFileDescriptionImpl) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ var bufValue []byte = fd.saveBuf()
+ stateSinkObject.SaveValue(1, bufValue)
+ stateSinkObject.Save(0, &fd.data)
+ stateSinkObject.Save(2, &fd.off)
+ stateSinkObject.Save(3, &fd.lastRead)
+}
+
+func (fd *DynamicBytesFileDescriptionImpl) afterLoad() {}
+
+// +checklocksignore
+func (fd *DynamicBytesFileDescriptionImpl) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.data)
+ stateSourceObject.Load(2, &fd.off)
+ stateSourceObject.Load(3, &fd.lastRead)
+ stateSourceObject.LoadValue(1, new([]byte), func(y interface{}) { fd.loadBuf(y.([]byte)) })
+}
+
+func (fd *LockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.LockFD"
+}
+
+func (fd *LockFD) StateFields() []string {
+ return []string{
+ "locks",
+ }
+}
+
+func (fd *LockFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *LockFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.locks)
+}
+
+func (fd *LockFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *LockFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.locks)
+}
+
+func (n *NoLockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.NoLockFD"
+}
+
+func (n *NoLockFD) StateFields() []string {
+ return []string{}
+}
+
+func (n *NoLockFD) beforeSave() {}
+
+// +checklocksignore
+func (n *NoLockFD) StateSave(stateSinkObject state.Sink) {
+ n.beforeSave()
+}
+
+func (n *NoLockFD) afterLoad() {}
+
+// +checklocksignore
+func (n *NoLockFD) StateLoad(stateSourceObject state.Source) {
+}
+
+func (b *BadLockFD) StateTypeName() string {
+ return "pkg/sentry/vfs.BadLockFD"
+}
+
+func (b *BadLockFD) StateFields() []string {
+ return []string{}
+}
+
+func (b *BadLockFD) beforeSave() {}
+
+// +checklocksignore
+func (b *BadLockFD) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+}
+
+func (b *BadLockFD) afterLoad() {}
+
+// +checklocksignore
+func (b *BadLockFD) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *FileDescriptionRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.FileDescriptionRefs"
+}
+
+func (r *FileDescriptionRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FileDescriptionRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FileDescriptionRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FileDescriptionRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (fs *Filesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.Filesystem"
+}
+
+func (fs *Filesystem) StateFields() []string {
+ return []string{
+ "FilesystemRefs",
+ "vfs",
+ "fsType",
+ "impl",
+ }
+}
+
+func (fs *Filesystem) beforeSave() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateSave(stateSinkObject state.Sink) {
+ fs.beforeSave()
+ stateSinkObject.Save(0, &fs.FilesystemRefs)
+ stateSinkObject.Save(1, &fs.vfs)
+ stateSinkObject.Save(2, &fs.fsType)
+ stateSinkObject.Save(3, &fs.impl)
+}
+
+func (fs *Filesystem) afterLoad() {}
+
+// +checklocksignore
+func (fs *Filesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fs.FilesystemRefs)
+ stateSourceObject.Load(1, &fs.vfs)
+ stateSourceObject.Load(2, &fs.fsType)
+ stateSourceObject.Load(3, &fs.impl)
+}
+
+func (p *PrependPathAtVFSRootError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathAtVFSRootError"
+}
+
+func (p *PrependPathAtVFSRootError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathAtVFSRootError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathAtVFSRootError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathAtVFSRootError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathAtVFSRootError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (p *PrependPathAtNonMountRootError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathAtNonMountRootError"
+}
+
+func (p *PrependPathAtNonMountRootError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathAtNonMountRootError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathAtNonMountRootError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathAtNonMountRootError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathAtNonMountRootError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (p *PrependPathSyntheticError) StateTypeName() string {
+ return "pkg/sentry/vfs.PrependPathSyntheticError"
+}
+
+func (p *PrependPathSyntheticError) StateFields() []string {
+ return []string{}
+}
+
+func (p *PrependPathSyntheticError) beforeSave() {}
+
+// +checklocksignore
+func (p *PrependPathSyntheticError) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+}
+
+func (p *PrependPathSyntheticError) afterLoad() {}
+
+// +checklocksignore
+func (p *PrependPathSyntheticError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *FilesystemRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.FilesystemRefs"
+}
+
+func (r *FilesystemRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *FilesystemRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *FilesystemRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *FilesystemRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (r *registeredFilesystemType) StateTypeName() string {
+ return "pkg/sentry/vfs.registeredFilesystemType"
+}
+
+func (r *registeredFilesystemType) StateFields() []string {
+ return []string{
+ "fsType",
+ "opts",
+ }
+}
+
+func (r *registeredFilesystemType) beforeSave() {}
+
+// +checklocksignore
+func (r *registeredFilesystemType) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.fsType)
+ stateSinkObject.Save(1, &r.opts)
+}
+
+func (r *registeredFilesystemType) afterLoad() {}
+
+// +checklocksignore
+func (r *registeredFilesystemType) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.fsType)
+ stateSourceObject.Load(1, &r.opts)
+}
+
+func (r *RegisterFilesystemTypeOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RegisterFilesystemTypeOptions"
+}
+
+func (r *RegisterFilesystemTypeOptions) StateFields() []string {
+ return []string{
+ "AllowUserMount",
+ "AllowUserList",
+ "RequiresDevice",
+ }
+}
+
+func (r *RegisterFilesystemTypeOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RegisterFilesystemTypeOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.AllowUserMount)
+ stateSinkObject.Save(1, &r.AllowUserList)
+ stateSinkObject.Save(2, &r.RequiresDevice)
+}
+
+func (r *RegisterFilesystemTypeOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RegisterFilesystemTypeOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.AllowUserMount)
+ stateSourceObject.Load(1, &r.AllowUserList)
+ stateSourceObject.Load(2, &r.RequiresDevice)
+}
+
+func (e *EventType) StateTypeName() string {
+ return "pkg/sentry/vfs.EventType"
+}
+
+func (e *EventType) StateFields() []string {
+ return nil
+}
+
+func (i *Inotify) StateTypeName() string {
+ return "pkg/sentry/vfs.Inotify"
+}
+
+func (i *Inotify) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "DentryMetadataFileDescriptionImpl",
+ "NoLockFD",
+ "id",
+ "queue",
+ "events",
+ "scratch",
+ "nextWatchMinusOne",
+ "watches",
+ }
+}
+
+func (i *Inotify) beforeSave() {}
+
+// +checklocksignore
+func (i *Inotify) StateSave(stateSinkObject state.Sink) {
+ i.beforeSave()
+ stateSinkObject.Save(0, &i.vfsfd)
+ stateSinkObject.Save(1, &i.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &i.DentryMetadataFileDescriptionImpl)
+ stateSinkObject.Save(3, &i.NoLockFD)
+ stateSinkObject.Save(4, &i.id)
+ stateSinkObject.Save(5, &i.queue)
+ stateSinkObject.Save(6, &i.events)
+ stateSinkObject.Save(7, &i.scratch)
+ stateSinkObject.Save(8, &i.nextWatchMinusOne)
+ stateSinkObject.Save(9, &i.watches)
+}
+
+func (i *Inotify) afterLoad() {}
+
+// +checklocksignore
+func (i *Inotify) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &i.vfsfd)
+ stateSourceObject.Load(1, &i.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &i.DentryMetadataFileDescriptionImpl)
+ stateSourceObject.Load(3, &i.NoLockFD)
+ stateSourceObject.Load(4, &i.id)
+ stateSourceObject.Load(5, &i.queue)
+ stateSourceObject.Load(6, &i.events)
+ stateSourceObject.Load(7, &i.scratch)
+ stateSourceObject.Load(8, &i.nextWatchMinusOne)
+ stateSourceObject.Load(9, &i.watches)
+}
+
+func (w *Watches) StateTypeName() string {
+ return "pkg/sentry/vfs.Watches"
+}
+
+func (w *Watches) StateFields() []string {
+ return []string{
+ "ws",
+ }
+}
+
+func (w *Watches) beforeSave() {}
+
+// +checklocksignore
+func (w *Watches) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.ws)
+}
+
+func (w *Watches) afterLoad() {}
+
+// +checklocksignore
+func (w *Watches) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.ws)
+}
+
+func (w *Watch) StateTypeName() string {
+ return "pkg/sentry/vfs.Watch"
+}
+
+func (w *Watch) StateFields() []string {
+ return []string{
+ "owner",
+ "wd",
+ "target",
+ "mask",
+ "expired",
+ }
+}
+
+func (w *Watch) beforeSave() {}
+
+// +checklocksignore
+func (w *Watch) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.owner)
+ stateSinkObject.Save(1, &w.wd)
+ stateSinkObject.Save(2, &w.target)
+ stateSinkObject.Save(3, &w.mask)
+ stateSinkObject.Save(4, &w.expired)
+}
+
+func (w *Watch) afterLoad() {}
+
+// +checklocksignore
+func (w *Watch) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.owner)
+ stateSourceObject.Load(1, &w.wd)
+ stateSourceObject.Load(2, &w.target)
+ stateSourceObject.Load(3, &w.mask)
+ stateSourceObject.Load(4, &w.expired)
+}
+
+func (e *Event) StateTypeName() string {
+ return "pkg/sentry/vfs.Event"
+}
+
+func (e *Event) StateFields() []string {
+ return []string{
+ "eventEntry",
+ "wd",
+ "mask",
+ "cookie",
+ "len",
+ "name",
+ }
+}
+
+func (e *Event) beforeSave() {}
+
+// +checklocksignore
+func (e *Event) StateSave(stateSinkObject state.Sink) {
+ e.beforeSave()
+ stateSinkObject.Save(0, &e.eventEntry)
+ stateSinkObject.Save(1, &e.wd)
+ stateSinkObject.Save(2, &e.mask)
+ stateSinkObject.Save(3, &e.cookie)
+ stateSinkObject.Save(4, &e.len)
+ stateSinkObject.Save(5, &e.name)
+}
+
+func (e *Event) afterLoad() {}
+
+// +checklocksignore
+func (e *Event) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &e.eventEntry)
+ stateSourceObject.Load(1, &e.wd)
+ stateSourceObject.Load(2, &e.mask)
+ stateSourceObject.Load(3, &e.cookie)
+ stateSourceObject.Load(4, &e.len)
+ stateSourceObject.Load(5, &e.name)
+}
+
+func (fl *FileLocks) StateTypeName() string {
+ return "pkg/sentry/vfs.FileLocks"
+}
+
+func (fl *FileLocks) StateFields() []string {
+ return []string{
+ "bsd",
+ "posix",
+ }
+}
+
+func (fl *FileLocks) beforeSave() {}
+
+// +checklocksignore
+func (fl *FileLocks) StateSave(stateSinkObject state.Sink) {
+ fl.beforeSave()
+ stateSinkObject.Save(0, &fl.bsd)
+ stateSinkObject.Save(1, &fl.posix)
+}
+
+func (fl *FileLocks) afterLoad() {}
+
+// +checklocksignore
+func (fl *FileLocks) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fl.bsd)
+ stateSourceObject.Load(1, &fl.posix)
+}
+
+func (mnt *Mount) StateTypeName() string {
+ return "pkg/sentry/vfs.Mount"
+}
+
+func (mnt *Mount) StateFields() []string {
+ return []string{
+ "vfs",
+ "fs",
+ "root",
+ "ID",
+ "Flags",
+ "key",
+ "ns",
+ "refs",
+ "children",
+ "umounted",
+ "writers",
+ }
+}
+
+func (mnt *Mount) beforeSave() {}
+
+// +checklocksignore
+func (mnt *Mount) StateSave(stateSinkObject state.Sink) {
+ mnt.beforeSave()
+ var keyValue VirtualDentry = mnt.saveKey()
+ stateSinkObject.SaveValue(5, keyValue)
+ stateSinkObject.Save(0, &mnt.vfs)
+ stateSinkObject.Save(1, &mnt.fs)
+ stateSinkObject.Save(2, &mnt.root)
+ stateSinkObject.Save(3, &mnt.ID)
+ stateSinkObject.Save(4, &mnt.Flags)
+ stateSinkObject.Save(6, &mnt.ns)
+ stateSinkObject.Save(7, &mnt.refs)
+ stateSinkObject.Save(8, &mnt.children)
+ stateSinkObject.Save(9, &mnt.umounted)
+ stateSinkObject.Save(10, &mnt.writers)
+}
+
+// +checklocksignore
+func (mnt *Mount) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mnt.vfs)
+ stateSourceObject.Load(1, &mnt.fs)
+ stateSourceObject.Load(2, &mnt.root)
+ stateSourceObject.Load(3, &mnt.ID)
+ stateSourceObject.Load(4, &mnt.Flags)
+ stateSourceObject.Load(6, &mnt.ns)
+ stateSourceObject.Load(7, &mnt.refs)
+ stateSourceObject.Load(8, &mnt.children)
+ stateSourceObject.Load(9, &mnt.umounted)
+ stateSourceObject.Load(10, &mnt.writers)
+ stateSourceObject.LoadValue(5, new(VirtualDentry), func(y interface{}) { mnt.loadKey(y.(VirtualDentry)) })
+ stateSourceObject.AfterLoad(mnt.afterLoad)
+}
+
+func (mntns *MountNamespace) StateTypeName() string {
+ return "pkg/sentry/vfs.MountNamespace"
+}
+
+func (mntns *MountNamespace) StateFields() []string {
+ return []string{
+ "MountNamespaceRefs",
+ "Owner",
+ "root",
+ "mountpoints",
+ }
+}
+
+func (mntns *MountNamespace) beforeSave() {}
+
+// +checklocksignore
+func (mntns *MountNamespace) StateSave(stateSinkObject state.Sink) {
+ mntns.beforeSave()
+ stateSinkObject.Save(0, &mntns.MountNamespaceRefs)
+ stateSinkObject.Save(1, &mntns.Owner)
+ stateSinkObject.Save(2, &mntns.root)
+ stateSinkObject.Save(3, &mntns.mountpoints)
+}
+
+func (mntns *MountNamespace) afterLoad() {}
+
+// +checklocksignore
+func (mntns *MountNamespace) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &mntns.MountNamespaceRefs)
+ stateSourceObject.Load(1, &mntns.Owner)
+ stateSourceObject.Load(2, &mntns.root)
+ stateSourceObject.Load(3, &mntns.mountpoints)
+}
+
+func (u *umountRecursiveOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.umountRecursiveOptions"
+}
+
+func (u *umountRecursiveOptions) StateFields() []string {
+ return []string{
+ "eager",
+ "disconnectHierarchy",
+ }
+}
+
+func (u *umountRecursiveOptions) beforeSave() {}
+
+// +checklocksignore
+func (u *umountRecursiveOptions) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.eager)
+ stateSinkObject.Save(1, &u.disconnectHierarchy)
+}
+
+func (u *umountRecursiveOptions) afterLoad() {}
+
+// +checklocksignore
+func (u *umountRecursiveOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.eager)
+ stateSourceObject.Load(1, &u.disconnectHierarchy)
+}
+
+func (r *MountNamespaceRefs) StateTypeName() string {
+ return "pkg/sentry/vfs.MountNamespaceRefs"
+}
+
+func (r *MountNamespaceRefs) StateFields() []string {
+ return []string{
+ "refCount",
+ }
+}
+
+func (r *MountNamespaceRefs) beforeSave() {}
+
+// +checklocksignore
+func (r *MountNamespaceRefs) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.refCount)
+}
+
+// +checklocksignore
+func (r *MountNamespaceRefs) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.refCount)
+ stateSourceObject.AfterLoad(r.afterLoad)
+}
+
+func (fd *opathFD) StateTypeName() string {
+ return "pkg/sentry/vfs.opathFD"
+}
+
+func (fd *opathFD) StateFields() []string {
+ return []string{
+ "vfsfd",
+ "FileDescriptionDefaultImpl",
+ "BadLockFD",
+ }
+}
+
+func (fd *opathFD) beforeSave() {}
+
+// +checklocksignore
+func (fd *opathFD) StateSave(stateSinkObject state.Sink) {
+ fd.beforeSave()
+ stateSinkObject.Save(0, &fd.vfsfd)
+ stateSinkObject.Save(1, &fd.FileDescriptionDefaultImpl)
+ stateSinkObject.Save(2, &fd.BadLockFD)
+}
+
+func (fd *opathFD) afterLoad() {}
+
+// +checklocksignore
+func (fd *opathFD) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &fd.vfsfd)
+ stateSourceObject.Load(1, &fd.FileDescriptionDefaultImpl)
+ stateSourceObject.Load(2, &fd.BadLockFD)
+}
+
+func (g *GetDentryOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.GetDentryOptions"
+}
+
+func (g *GetDentryOptions) StateFields() []string {
+ return []string{
+ "CheckSearchable",
+ }
+}
+
+func (g *GetDentryOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GetDentryOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.CheckSearchable)
+}
+
+func (g *GetDentryOptions) afterLoad() {}
+
+// +checklocksignore
+func (g *GetDentryOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.CheckSearchable)
+}
+
+func (m *MkdirOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MkdirOptions"
+}
+
+func (m *MkdirOptions) StateFields() []string {
+ return []string{
+ "Mode",
+ "ForSyntheticMountpoint",
+ }
+}
+
+func (m *MkdirOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MkdirOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Mode)
+ stateSinkObject.Save(1, &m.ForSyntheticMountpoint)
+}
+
+func (m *MkdirOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MkdirOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Mode)
+ stateSourceObject.Load(1, &m.ForSyntheticMountpoint)
+}
+
+func (m *MknodOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MknodOptions"
+}
+
+func (m *MknodOptions) StateFields() []string {
+ return []string{
+ "Mode",
+ "DevMajor",
+ "DevMinor",
+ "Endpoint",
+ }
+}
+
+func (m *MknodOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MknodOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Mode)
+ stateSinkObject.Save(1, &m.DevMajor)
+ stateSinkObject.Save(2, &m.DevMinor)
+ stateSinkObject.Save(3, &m.Endpoint)
+}
+
+func (m *MknodOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MknodOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Mode)
+ stateSourceObject.Load(1, &m.DevMajor)
+ stateSourceObject.Load(2, &m.DevMinor)
+ stateSourceObject.Load(3, &m.Endpoint)
+}
+
+func (m *MountFlags) StateTypeName() string {
+ return "pkg/sentry/vfs.MountFlags"
+}
+
+func (m *MountFlags) StateFields() []string {
+ return []string{
+ "NoExec",
+ "NoATime",
+ "NoDev",
+ "NoSUID",
+ }
+}
+
+func (m *MountFlags) beforeSave() {}
+
+// +checklocksignore
+func (m *MountFlags) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.NoExec)
+ stateSinkObject.Save(1, &m.NoATime)
+ stateSinkObject.Save(2, &m.NoDev)
+ stateSinkObject.Save(3, &m.NoSUID)
+}
+
+func (m *MountFlags) afterLoad() {}
+
+// +checklocksignore
+func (m *MountFlags) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.NoExec)
+ stateSourceObject.Load(1, &m.NoATime)
+ stateSourceObject.Load(2, &m.NoDev)
+ stateSourceObject.Load(3, &m.NoSUID)
+}
+
+func (m *MountOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.MountOptions"
+}
+
+func (m *MountOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "ReadOnly",
+ "GetFilesystemOptions",
+ "InternalMount",
+ }
+}
+
+func (m *MountOptions) beforeSave() {}
+
+// +checklocksignore
+func (m *MountOptions) StateSave(stateSinkObject state.Sink) {
+ m.beforeSave()
+ stateSinkObject.Save(0, &m.Flags)
+ stateSinkObject.Save(1, &m.ReadOnly)
+ stateSinkObject.Save(2, &m.GetFilesystemOptions)
+ stateSinkObject.Save(3, &m.InternalMount)
+}
+
+func (m *MountOptions) afterLoad() {}
+
+// +checklocksignore
+func (m *MountOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &m.Flags)
+ stateSourceObject.Load(1, &m.ReadOnly)
+ stateSourceObject.Load(2, &m.GetFilesystemOptions)
+ stateSourceObject.Load(3, &m.InternalMount)
+}
+
+func (o *OpenOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.OpenOptions"
+}
+
+func (o *OpenOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "Mode",
+ "FileExec",
+ }
+}
+
+func (o *OpenOptions) beforeSave() {}
+
+// +checklocksignore
+func (o *OpenOptions) StateSave(stateSinkObject state.Sink) {
+ o.beforeSave()
+ stateSinkObject.Save(0, &o.Flags)
+ stateSinkObject.Save(1, &o.Mode)
+ stateSinkObject.Save(2, &o.FileExec)
+}
+
+func (o *OpenOptions) afterLoad() {}
+
+// +checklocksignore
+func (o *OpenOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &o.Flags)
+ stateSourceObject.Load(1, &o.Mode)
+ stateSourceObject.Load(2, &o.FileExec)
+}
+
+func (r *ReadOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.ReadOptions"
+}
+
+func (r *ReadOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (r *ReadOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *ReadOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Flags)
+}
+
+func (r *ReadOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *ReadOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Flags)
+}
+
+func (r *RenameOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.RenameOptions"
+}
+
+func (r *RenameOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ "MustBeDir",
+ }
+}
+
+func (r *RenameOptions) beforeSave() {}
+
+// +checklocksignore
+func (r *RenameOptions) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+ stateSinkObject.Save(0, &r.Flags)
+ stateSinkObject.Save(1, &r.MustBeDir)
+}
+
+func (r *RenameOptions) afterLoad() {}
+
+// +checklocksignore
+func (r *RenameOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &r.Flags)
+ stateSourceObject.Load(1, &r.MustBeDir)
+}
+
+func (s *SetStatOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.SetStatOptions"
+}
+
+func (s *SetStatOptions) StateFields() []string {
+ return []string{
+ "Stat",
+ "NeedWritePerm",
+ }
+}
+
+func (s *SetStatOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *SetStatOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Stat)
+ stateSinkObject.Save(1, &s.NeedWritePerm)
+}
+
+func (s *SetStatOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *SetStatOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Stat)
+ stateSourceObject.Load(1, &s.NeedWritePerm)
+}
+
+func (b *BoundEndpointOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.BoundEndpointOptions"
+}
+
+func (b *BoundEndpointOptions) StateFields() []string {
+ return []string{
+ "Addr",
+ }
+}
+
+func (b *BoundEndpointOptions) beforeSave() {}
+
+// +checklocksignore
+func (b *BoundEndpointOptions) StateSave(stateSinkObject state.Sink) {
+ b.beforeSave()
+ stateSinkObject.Save(0, &b.Addr)
+}
+
+func (b *BoundEndpointOptions) afterLoad() {}
+
+// +checklocksignore
+func (b *BoundEndpointOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &b.Addr)
+}
+
+func (g *GetXattrOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.GetXattrOptions"
+}
+
+func (g *GetXattrOptions) StateFields() []string {
+ return []string{
+ "Name",
+ "Size",
+ }
+}
+
+func (g *GetXattrOptions) beforeSave() {}
+
+// +checklocksignore
+func (g *GetXattrOptions) StateSave(stateSinkObject state.Sink) {
+ g.beforeSave()
+ stateSinkObject.Save(0, &g.Name)
+ stateSinkObject.Save(1, &g.Size)
+}
+
+func (g *GetXattrOptions) afterLoad() {}
+
+// +checklocksignore
+func (g *GetXattrOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &g.Name)
+ stateSourceObject.Load(1, &g.Size)
+}
+
+func (s *SetXattrOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.SetXattrOptions"
+}
+
+func (s *SetXattrOptions) StateFields() []string {
+ return []string{
+ "Name",
+ "Value",
+ "Flags",
+ }
+}
+
+func (s *SetXattrOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *SetXattrOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Name)
+ stateSinkObject.Save(1, &s.Value)
+ stateSinkObject.Save(2, &s.Flags)
+}
+
+func (s *SetXattrOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *SetXattrOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Name)
+ stateSourceObject.Load(1, &s.Value)
+ stateSourceObject.Load(2, &s.Flags)
+}
+
+func (s *StatOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.StatOptions"
+}
+
+func (s *StatOptions) StateFields() []string {
+ return []string{
+ "Mask",
+ "Sync",
+ }
+}
+
+func (s *StatOptions) beforeSave() {}
+
+// +checklocksignore
+func (s *StatOptions) StateSave(stateSinkObject state.Sink) {
+ s.beforeSave()
+ stateSinkObject.Save(0, &s.Mask)
+ stateSinkObject.Save(1, &s.Sync)
+}
+
+func (s *StatOptions) afterLoad() {}
+
+// +checklocksignore
+func (s *StatOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &s.Mask)
+ stateSourceObject.Load(1, &s.Sync)
+}
+
+func (u *UmountOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.UmountOptions"
+}
+
+func (u *UmountOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (u *UmountOptions) beforeSave() {}
+
+// +checklocksignore
+func (u *UmountOptions) StateSave(stateSinkObject state.Sink) {
+ u.beforeSave()
+ stateSinkObject.Save(0, &u.Flags)
+}
+
+func (u *UmountOptions) afterLoad() {}
+
+// +checklocksignore
+func (u *UmountOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &u.Flags)
+}
+
+func (w *WriteOptions) StateTypeName() string {
+ return "pkg/sentry/vfs.WriteOptions"
+}
+
+func (w *WriteOptions) StateFields() []string {
+ return []string{
+ "Flags",
+ }
+}
+
+func (w *WriteOptions) beforeSave() {}
+
+// +checklocksignore
+func (w *WriteOptions) StateSave(stateSinkObject state.Sink) {
+ w.beforeSave()
+ stateSinkObject.Save(0, &w.Flags)
+}
+
+func (w *WriteOptions) afterLoad() {}
+
+// +checklocksignore
+func (w *WriteOptions) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &w.Flags)
+}
+
+func (a *AccessTypes) StateTypeName() string {
+ return "pkg/sentry/vfs.AccessTypes"
+}
+
+func (a *AccessTypes) StateFields() []string {
+ return nil
+}
+
+func (rp *ResolvingPath) StateTypeName() string {
+ return "pkg/sentry/vfs.ResolvingPath"
+}
+
+func (rp *ResolvingPath) StateFields() []string {
+ return []string{
+ "vfs",
+ "root",
+ "mount",
+ "start",
+ "pit",
+ "flags",
+ "mustBeDir",
+ "symlinks",
+ "curPart",
+ "creds",
+ "nextMount",
+ "nextStart",
+ "absSymlinkTarget",
+ "parts",
+ }
+}
+
+func (rp *ResolvingPath) beforeSave() {}
+
+// +checklocksignore
+func (rp *ResolvingPath) StateSave(stateSinkObject state.Sink) {
+ rp.beforeSave()
+ stateSinkObject.Save(0, &rp.vfs)
+ stateSinkObject.Save(1, &rp.root)
+ stateSinkObject.Save(2, &rp.mount)
+ stateSinkObject.Save(3, &rp.start)
+ stateSinkObject.Save(4, &rp.pit)
+ stateSinkObject.Save(5, &rp.flags)
+ stateSinkObject.Save(6, &rp.mustBeDir)
+ stateSinkObject.Save(7, &rp.symlinks)
+ stateSinkObject.Save(8, &rp.curPart)
+ stateSinkObject.Save(9, &rp.creds)
+ stateSinkObject.Save(10, &rp.nextMount)
+ stateSinkObject.Save(11, &rp.nextStart)
+ stateSinkObject.Save(12, &rp.absSymlinkTarget)
+ stateSinkObject.Save(13, &rp.parts)
+}
+
+func (rp *ResolvingPath) afterLoad() {}
+
+// +checklocksignore
+func (rp *ResolvingPath) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &rp.vfs)
+ stateSourceObject.Load(1, &rp.root)
+ stateSourceObject.Load(2, &rp.mount)
+ stateSourceObject.Load(3, &rp.start)
+ stateSourceObject.Load(4, &rp.pit)
+ stateSourceObject.Load(5, &rp.flags)
+ stateSourceObject.Load(6, &rp.mustBeDir)
+ stateSourceObject.Load(7, &rp.symlinks)
+ stateSourceObject.Load(8, &rp.curPart)
+ stateSourceObject.Load(9, &rp.creds)
+ stateSourceObject.Load(10, &rp.nextMount)
+ stateSourceObject.Load(11, &rp.nextStart)
+ stateSourceObject.Load(12, &rp.absSymlinkTarget)
+ stateSourceObject.Load(13, &rp.parts)
+}
+
+func (r *resolveMountRootOrJumpError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveMountRootOrJumpError"
+}
+
+func (r *resolveMountRootOrJumpError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveMountRootOrJumpError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveMountRootOrJumpError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveMountRootOrJumpError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveMountRootOrJumpError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *resolveMountPointError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveMountPointError"
+}
+
+func (r *resolveMountPointError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveMountPointError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveMountPointError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveMountPointError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveMountPointError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (r *resolveAbsSymlinkError) StateTypeName() string {
+ return "pkg/sentry/vfs.resolveAbsSymlinkError"
+}
+
+func (r *resolveAbsSymlinkError) StateFields() []string {
+ return []string{}
+}
+
+func (r *resolveAbsSymlinkError) beforeSave() {}
+
+// +checklocksignore
+func (r *resolveAbsSymlinkError) StateSave(stateSinkObject state.Sink) {
+ r.beforeSave()
+}
+
+func (r *resolveAbsSymlinkError) afterLoad() {}
+
+// +checklocksignore
+func (r *resolveAbsSymlinkError) StateLoad(stateSourceObject state.Source) {
+}
+
+func (vfs *VirtualFilesystem) StateTypeName() string {
+ return "pkg/sentry/vfs.VirtualFilesystem"
+}
+
+func (vfs *VirtualFilesystem) StateFields() []string {
+ return []string{
+ "mounts",
+ "mountpoints",
+ "lastMountID",
+ "anonMount",
+ "devices",
+ "anonBlockDevMinorNext",
+ "anonBlockDevMinor",
+ "fsTypes",
+ "filesystems",
+ }
+}
+
+func (vfs *VirtualFilesystem) beforeSave() {}
+
+// +checklocksignore
+func (vfs *VirtualFilesystem) StateSave(stateSinkObject state.Sink) {
+ vfs.beforeSave()
+ var mountsValue []*Mount = vfs.saveMounts()
+ stateSinkObject.SaveValue(0, mountsValue)
+ stateSinkObject.Save(1, &vfs.mountpoints)
+ stateSinkObject.Save(2, &vfs.lastMountID)
+ stateSinkObject.Save(3, &vfs.anonMount)
+ stateSinkObject.Save(4, &vfs.devices)
+ stateSinkObject.Save(5, &vfs.anonBlockDevMinorNext)
+ stateSinkObject.Save(6, &vfs.anonBlockDevMinor)
+ stateSinkObject.Save(7, &vfs.fsTypes)
+ stateSinkObject.Save(8, &vfs.filesystems)
+}
+
+func (vfs *VirtualFilesystem) afterLoad() {}
+
+// +checklocksignore
+func (vfs *VirtualFilesystem) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(1, &vfs.mountpoints)
+ stateSourceObject.Load(2, &vfs.lastMountID)
+ stateSourceObject.Load(3, &vfs.anonMount)
+ stateSourceObject.Load(4, &vfs.devices)
+ stateSourceObject.Load(5, &vfs.anonBlockDevMinorNext)
+ stateSourceObject.Load(6, &vfs.anonBlockDevMinor)
+ stateSourceObject.Load(7, &vfs.fsTypes)
+ stateSourceObject.Load(8, &vfs.filesystems)
+ stateSourceObject.LoadValue(0, new([]*Mount), func(y interface{}) { vfs.loadMounts(y.([]*Mount)) })
+}
+
+func (p *PathOperation) StateTypeName() string {
+ return "pkg/sentry/vfs.PathOperation"
+}
+
+func (p *PathOperation) StateFields() []string {
+ return []string{
+ "Root",
+ "Start",
+ "Path",
+ "FollowFinalSymlink",
+ }
+}
+
+func (p *PathOperation) beforeSave() {}
+
+// +checklocksignore
+func (p *PathOperation) StateSave(stateSinkObject state.Sink) {
+ p.beforeSave()
+ stateSinkObject.Save(0, &p.Root)
+ stateSinkObject.Save(1, &p.Start)
+ stateSinkObject.Save(2, &p.Path)
+ stateSinkObject.Save(3, &p.FollowFinalSymlink)
+}
+
+func (p *PathOperation) afterLoad() {}
+
+// +checklocksignore
+func (p *PathOperation) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &p.Root)
+ stateSourceObject.Load(1, &p.Start)
+ stateSourceObject.Load(2, &p.Path)
+ stateSourceObject.Load(3, &p.FollowFinalSymlink)
+}
+
+func (vd *VirtualDentry) StateTypeName() string {
+ return "pkg/sentry/vfs.VirtualDentry"
+}
+
+func (vd *VirtualDentry) StateFields() []string {
+ return []string{
+ "mount",
+ "dentry",
+ }
+}
+
+func (vd *VirtualDentry) beforeSave() {}
+
+// +checklocksignore
+func (vd *VirtualDentry) StateSave(stateSinkObject state.Sink) {
+ vd.beforeSave()
+ stateSinkObject.Save(0, &vd.mount)
+ stateSinkObject.Save(1, &vd.dentry)
+}
+
+func (vd *VirtualDentry) afterLoad() {}
+
+// +checklocksignore
+func (vd *VirtualDentry) StateLoad(stateSourceObject state.Source) {
+ stateSourceObject.Load(0, &vd.mount)
+ stateSourceObject.Load(1, &vd.dentry)
+}
+
+func init() {
+ state.Register((*anonFilesystemType)(nil))
+ state.Register((*anonFilesystem)(nil))
+ state.Register((*anonDentry)(nil))
+ state.Register((*Dentry)(nil))
+ state.Register((*DeviceKind)(nil))
+ state.Register((*devTuple)(nil))
+ state.Register((*registeredDevice)(nil))
+ state.Register((*RegisterDeviceOptions)(nil))
+ state.Register((*EpollInstance)(nil))
+ state.Register((*epollInterestKey)(nil))
+ state.Register((*epollInterest)(nil))
+ state.Register((*epollInterestList)(nil))
+ state.Register((*epollInterestEntry)(nil))
+ state.Register((*eventList)(nil))
+ state.Register((*eventEntry)(nil))
+ state.Register((*FileDescription)(nil))
+ state.Register((*FileDescriptionOptions)(nil))
+ state.Register((*Dirent)(nil))
+ state.Register((*FileDescriptionDefaultImpl)(nil))
+ state.Register((*DirectoryFileDescriptionDefaultImpl)(nil))
+ state.Register((*DentryMetadataFileDescriptionImpl)(nil))
+ state.Register((*StaticData)(nil))
+ state.Register((*DynamicBytesFileDescriptionImpl)(nil))
+ state.Register((*LockFD)(nil))
+ state.Register((*NoLockFD)(nil))
+ state.Register((*BadLockFD)(nil))
+ state.Register((*FileDescriptionRefs)(nil))
+ state.Register((*Filesystem)(nil))
+ state.Register((*PrependPathAtVFSRootError)(nil))
+ state.Register((*PrependPathAtNonMountRootError)(nil))
+ state.Register((*PrependPathSyntheticError)(nil))
+ state.Register((*FilesystemRefs)(nil))
+ state.Register((*registeredFilesystemType)(nil))
+ state.Register((*RegisterFilesystemTypeOptions)(nil))
+ state.Register((*EventType)(nil))
+ state.Register((*Inotify)(nil))
+ state.Register((*Watches)(nil))
+ state.Register((*Watch)(nil))
+ state.Register((*Event)(nil))
+ state.Register((*FileLocks)(nil))
+ state.Register((*Mount)(nil))
+ state.Register((*MountNamespace)(nil))
+ state.Register((*umountRecursiveOptions)(nil))
+ state.Register((*MountNamespaceRefs)(nil))
+ state.Register((*opathFD)(nil))
+ state.Register((*GetDentryOptions)(nil))
+ state.Register((*MkdirOptions)(nil))
+ state.Register((*MknodOptions)(nil))
+ state.Register((*MountFlags)(nil))
+ state.Register((*MountOptions)(nil))
+ state.Register((*OpenOptions)(nil))
+ state.Register((*ReadOptions)(nil))
+ state.Register((*RenameOptions)(nil))
+ state.Register((*SetStatOptions)(nil))
+ state.Register((*BoundEndpointOptions)(nil))
+ state.Register((*GetXattrOptions)(nil))
+ state.Register((*SetXattrOptions)(nil))
+ state.Register((*StatOptions)(nil))
+ state.Register((*UmountOptions)(nil))
+ state.Register((*WriteOptions)(nil))
+ state.Register((*AccessTypes)(nil))
+ state.Register((*ResolvingPath)(nil))
+ state.Register((*resolveMountRootOrJumpError)(nil))
+ state.Register((*resolveMountPointError)(nil))
+ state.Register((*resolveAbsSymlinkError)(nil))
+ state.Register((*VirtualFilesystem)(nil))
+ state.Register((*PathOperation)(nil))
+ state.Register((*VirtualDentry)(nil))
+}
diff --git a/pkg/sentry/vfs/vfs_unsafe_state_autogen.go b/pkg/sentry/vfs/vfs_unsafe_state_autogen.go
new file mode 100644
index 000000000..20f06c953
--- /dev/null
+++ b/pkg/sentry/vfs/vfs_unsafe_state_autogen.go
@@ -0,0 +1,3 @@
+// automatically generated by stateify.
+
+package vfs