diff options
Diffstat (limited to 'pkg/sentry/fs')
190 files changed, 38167 insertions, 0 deletions
diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD new file mode 100644 index 000000000..ea85ab33c --- /dev/null +++ b/pkg/sentry/fs/BUILD @@ -0,0 +1,135 @@ +load("//tools:defs.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +package(licenses = ["notice"]) + +go_library( + name = "fs", + srcs = [ + "attr.go", + "context.go", + "copy_up.go", + "dentry.go", + "dirent.go", + "dirent_cache.go", + "dirent_cache_limiter.go", + "dirent_list.go", + "dirent_state.go", + "event_list.go", + "file.go", + "file_operations.go", + "file_overlay.go", + "file_state.go", + "filesystems.go", + "flags.go", + "fs.go", + "inode.go", + "inode_inotify.go", + "inode_operations.go", + "inode_overlay.go", + "inotify.go", + "inotify_event.go", + "inotify_watch.go", + "mock.go", + "mount.go", + "mount_overlay.go", + "mounts.go", + "offset.go", + "overlay.go", + "path.go", + "restore.go", + "save.go", + "seek.go", + "splice.go", + "sync.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/context", + "//pkg/log", + "//pkg/metric", + "//pkg/p9", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/arch", + "//pkg/sentry/device", + "//pkg/sentry/fs/lock", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usage", + "//pkg/state", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_template_instance( + name = "dirent_list", + out = "dirent_list.go", + package = "fs", + prefix = "dirent", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Dirent", + "Element": "*Dirent", + }, +) + +go_template_instance( + name = "event_list", + out = "event_list.go", + package = "fs", + prefix = "event", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Event", + "Element": "*Event", + }, +) + +go_test( + name = "fs_x_test", + size = "small", + srcs = [ + "copy_up_test.go", + "file_overlay_test.go", + "inode_overlay_test.go", + "mounts_test.go", + ], + deps = [ + ":fs", + "//pkg/context", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/kernel/contexttest", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + ], +) + +go_test( + name = "fs_test", + size = "small", + srcs = [ + "dirent_cache_test.go", + "dirent_refs_test.go", + "mount_test.go", + "path_test.go", + ], + library = ":fs", + deps = [ + "//pkg/context", + "//pkg/sentry/contexttest", + ], +) diff --git a/pkg/sentry/fs/README.md b/pkg/sentry/fs/README.md new file mode 100644 index 000000000..db4a1b730 --- /dev/null +++ b/pkg/sentry/fs/README.md @@ -0,0 +1,229 @@ +This package provides an implementation of the Linux virtual filesystem. + +[TOC] + +## Overview + +- An `fs.Dirent` caches an `fs.Inode` in memory at a path in the VFS, giving + the `fs.Inode` a relative position with respect to other `fs.Inode`s. + +- If an `fs.Dirent` is referenced by two file descriptors, then those file + descriptors are coherent with each other: they depend on the same + `fs.Inode`. + +- A mount point is an `fs.Dirent` for which `fs.Dirent.mounted` is true. It + exposes the root of a mounted filesystem. + +- The `fs.Inode` produced by a registered filesystem on mount(2) owns an + `fs.MountedFilesystem` from which other `fs.Inode`s will be looked up. For a + remote filesystem, the `fs.MountedFilesystem` owns the connection to that + remote filesystem. + +- In general: + +``` +fs.Inode <------------------------------ +| | +| | +produced by | +exactly one | +| responsible for the +| virtual identity of +v | +fs.MountedFilesystem ------------------- +``` + +Glossary: + +- VFS: virtual filesystem. + +- inode: a virtual file object holding a cached view of a file on a backing + filesystem (includes metadata and page caches). + +- superblock: the virtual state of a mounted filesystem (e.g. the virtual + inode number set). + +- mount namespace: a view of the mounts under a root (during path traversal, + the VFS makes visible/follows the mount point that is in the current task's + mount namespace). + +## Save and restore + +An application's hard dependencies on filesystem state can be broken down into +two categories: + +- The state necessary to execute a traversal on or view the *virtual* + filesystem hierarchy, regardless of what files an application has open. + +- The state necessary to represent open files. + +The first is always necessary to save and restore. An application may never have +any open file descriptors, but across save and restore it should see a coherent +view of any mount namespace. NOTE(b/63601033): Currently only one "initial" +mount namespace is supported. + +The second is so that system calls across save and restore are coherent with +each other (e.g. so that unintended re-reads or overwrites do not occur). + +Specifically this state is: + +- An `fs.MountManager` containing mount points. + +- A `kernel.FDTable` containing pointers to open files. + +Anything else managed by the VFS that can be easily loaded into memory from a +filesystem is synced back to those filesystems and is not saved. Examples are +pages in page caches used for optimizations (i.e. readahead and writeback), and +directory entries used to accelerate path lookups. + +### Mount points + +Saving and restoring a mount point means saving and restoring: + +- The root of the mounted filesystem. + +- Mount flags, which control how the VFS interacts with the mounted + filesystem. + +- Any relevant metadata about the mounted filesystem. + +- All `fs.Inode`s referenced by the application that reside under the mount + point. + +`fs.MountedFilesystem` is metadata about a filesystem that is mounted. It is +referenced by every `fs.Inode` loaded into memory under the mount point +including the `fs.Inode` of the mount point itself. The `fs.MountedFilesystem` +maps file objects on the filesystem to a virtualized `fs.Inode` number and vice +versa. + +To restore all `fs.Inode`s under a given mount point, each `fs.Inode` leverages +its dependency on an `fs.MountedFilesystem`. Since the `fs.MountedFilesystem` +knows how an `fs.Inode` maps to a file object on a backing filesystem, this +mapping can be trivially consulted by each `fs.Inode` when the `fs.Inode` is +restored. + +In detail, a mount point is saved in two steps: + +- First, after the kernel is paused but before state.Save, we walk all mount + namespaces and install a mapping from `fs.Inode` numbers to file paths + relative to the root of the mounted filesystem in each + `fs.MountedFilesystem`. This is subsequently called the set of `fs.Inode` + mappings. + +- Second, during state.Save, each `fs.MountedFilesystem` decides whether to + save the set of `fs.Inode` mappings. In-memory filesystems, like tmpfs, have + no need to save a set of `fs.Inode` mappings, since the `fs.Inode`s can be + entirely encoded in state file. Each `fs.MountedFilesystem` also optionally + saves the device name from when the filesystem was originally mounted. Each + `fs.Inode` saves its virtual identifier and a reference to a + `fs.MountedFilesystem`. + +A mount point is restored in two steps: + +- First, before state.Load, all mount configurations are stored in a global + `fs.RestoreEnvironment`. This tells us what mount points the user wants to + restore and how to re-establish pointers to backing filesystems. + +- Second, during state.Load, each `fs.MountedFilesystem` optionally searches + for a mount in the `fs.RestoreEnvironment` that matches its saved device + name. The `fs.MountedFilesystem` then reestablishes a pointer to the root of + the mounted filesystem. For example, the mount specification provides the + network connection for a mounted remote filesystem client to communicate + with its remote file server. The `fs.MountedFilesystem` also trivially loads + its set of `fs.Inode` mappings. When an `fs.Inode` is encountered, the + `fs.Inode` loads its virtual identifier and its reference a + `fs.MountedFilesystem`. It uses the `fs.MountedFilesystem` to obtain the + root of the mounted filesystem and the `fs.Inode` mappings to obtain the + relative file path to its data. With these, the `fs.Inode` re-establishes a + pointer to its file object. + +A mount point can trivially restore its `fs.Inode`s in parallel since +`fs.Inode`s have a restore dependency on their `fs.MountedFilesystem` and not on +each other. + +### Open files + +An `fs.File` references the following filesystem objects: + +```go +fs.File -> fs.Dirent -> fs.Inode -> fs.MountedFilesystem +``` + +The `fs.Inode` is restored using its `fs.MountedFilesystem`. The +[Mount points](#mount-points) section above describes how this happens in +detail. The `fs.Dirent` restores its pointer to an `fs.Inode`, pointers to +parent and children `fs.Dirents`, and the basename of the file. + +Otherwise an `fs.File` restores flags, an offset, and a unique identifier (only +used internally). + +It may use the `fs.Inode`, which it indirectly holds a reference on through the +`fs.Dirent`, to reestablish an open file handle on the backing filesystem (e.g. +to continue reading and writing). + +## Overlay + +The overlay implementation in the fs package takes Linux overlayfs as a frame of +reference but corrects for several POSIX consistency errors. + +In Linux overlayfs, the `struct inode` used for reading and writing to the same +file may be different. This is because the `struct inode` is dissociated with +the process of copying up the file from the upper to the lower directory. Since +flock(2) and fcntl(2) locks, inotify(7) watches, page caches, and a file's +identity are all stored directly or indirectly off the `struct inode`, these +properties of the `struct inode` may be stale after the first modification. This +can lead to file locking bugs, missed inotify events, and inconsistent data in +shared memory mappings of files, to name a few problems. + +The fs package maintains a single `fs.Inode` to represent a directory entry in +an overlay and defines operations on this `fs.Inode` which synchronize with the +copy up process. This achieves several things: + ++ File locks, inotify watches, and the identity of the file need not be copied + at all. + ++ Memory mappings of files coordinate with the copy up process so that if a + file in the lower directory is memory mapped, all references to it are + invalidated, forcing the application to re-fault on memory mappings of the + file under the upper directory. + +The `fs.Inode` holds metadata about files in the upper and/or lower directories +via an `fs.overlayEntry`. The `fs.overlayEntry` implements the `fs.Mappable` +interface. It multiplexes between upper and lower directory memory mappings and +stores a copy of memory references so they can be transferred to the upper +directory `fs.Mappable` when the file is copied up. + +The lower filesystem in an overlay may contain another (nested) overlay, but the +upper filesystem may not contain another overlay. In other words, nested +overlays form a tree structure that only allows branching in the lower +filesystem. + +Caching decisions in the overlay are delegated to the upper filesystem, meaning +that the Keep and Revalidate methods on the overlay return the same values as +the upper filesystem. A small wrinkle is that the lower filesystem is not +allowed to return `true` from Revalidate, as the overlay can not reload inodes +from the lower filesystem. A lower filesystem that does return `true` from +Revalidate will trigger a panic. + +The `fs.Inode` also holds a reference to a `fs.MountedFilesystem` that +normalizes across the mounted filesystem state of the upper and lower +directories. + +When a file is copied from the lower to the upper directory, attempts to +interact with the file block until the copy completes. All copying synchronizes +with rename(2). + +## Future Work + +### Overlay + +When a file is copied from a lower directory to an upper directory, several +locks are taken: the global renamuMu and the copyMu of the `fs.Inode` being +copied. This blocks operations on the file, including fault handling of memory +mappings. Performance could be improved by copying files into a temporary +directory that resides on the same filesystem as the upper directory and doing +an atomic rename, holding locks only during the rename operation. + +Additionally files are copied up synchronously. For large files, this causes a +noticeable latency. Performance could be improved by pipelining copies at +non-overlapping file offsets. diff --git a/pkg/sentry/fs/anon/BUILD b/pkg/sentry/fs/anon/BUILD new file mode 100644 index 000000000..aedcecfa1 --- /dev/null +++ b/pkg/sentry/fs/anon/BUILD @@ -0,0 +1,20 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "anon", + srcs = [ + "anon.go", + "device.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/anon/anon.go b/pkg/sentry/fs/anon/anon.go new file mode 100644 index 000000000..5c421f5fb --- /dev/null +++ b/pkg/sentry/fs/anon/anon.go @@ -0,0 +1,42 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package anon implements an anonymous inode, useful for implementing +// inodes for pseudo filesystems. +package anon + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/usermem" +) + +// NewInode constructs an anonymous Inode that is not associated +// with any real filesystem. Some types depend on completely pseudo +// "anon" inodes (eventfds, epollfds, etc). +func NewInode(ctx context.Context) *fs.Inode { + iops := &fsutil.SimpleFileInode{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, linux.ANON_INODE_FS_MAGIC), + } + return fs.NewInode(ctx, iops, fs.NewPseudoMountSource(ctx), fs.StableAttr{ + Type: fs.Anonymous, + DeviceID: PseudoDevice.DeviceID(), + InodeID: PseudoDevice.NextIno(), + BlockSize: usermem.PageSize, + }) +} diff --git a/pkg/sentry/fs/anon/device.go b/pkg/sentry/fs/anon/device.go new file mode 100644 index 000000000..d9ac14956 --- /dev/null +++ b/pkg/sentry/fs/anon/device.go @@ -0,0 +1,22 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package anon + +import ( + "gvisor.dev/gvisor/pkg/sentry/device" +) + +// PseudoDevice is the device on which all anonymous inodes reside. +var PseudoDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/attr.go b/pkg/sentry/fs/attr.go new file mode 100644 index 000000000..f60bd423d --- /dev/null +++ b/pkg/sentry/fs/attr.go @@ -0,0 +1,493 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" +) + +// InodeType enumerates types of Inodes. +type InodeType int + +const ( + // RegularFile is a regular file. + RegularFile InodeType = iota + + // SpecialFile is a file that doesn't support SeekEnd. It is used for + // things like proc files. + SpecialFile + + // Directory is a directory. + Directory + + // SpecialDirectory is a directory that *does* support SeekEnd. It's + // the opposite of the SpecialFile scenario above. It similarly + // supports proc files. + SpecialDirectory + + // Symlink is a symbolic link. + Symlink + + // Pipe is a pipe (named or regular). + Pipe + + // Socket is a socket. + Socket + + // CharacterDevice is a character device. + CharacterDevice + + // BlockDevice is a block device. + BlockDevice + + // Anonymous is an anonymous type when none of the above apply. + // Epoll fds and event-driven fds fit this category. + Anonymous +) + +// String returns a human-readable representation of the InodeType. +func (n InodeType) String() string { + switch n { + case RegularFile, SpecialFile: + return "file" + case Directory, SpecialDirectory: + return "directory" + case Symlink: + return "symlink" + case Pipe: + return "pipe" + case Socket: + return "socket" + case CharacterDevice: + return "character-device" + case BlockDevice: + return "block-device" + case Anonymous: + return "anonymous" + default: + return "unknown" + } +} + +// LinuxType returns the linux file type for this inode type. +func (n InodeType) LinuxType() uint32 { + switch n { + case RegularFile, SpecialFile: + return linux.ModeRegular + case Directory, SpecialDirectory: + return linux.ModeDirectory + case Symlink: + return linux.ModeSymlink + case Pipe: + return linux.ModeNamedPipe + case CharacterDevice: + return linux.ModeCharacterDevice + case BlockDevice: + return linux.ModeBlockDevice + case Socket: + return linux.ModeSocket + default: + return 0 + } +} + +// ToDirentType converts an InodeType to a linux dirent type field. +func ToDirentType(nodeType InodeType) uint8 { + switch nodeType { + case RegularFile, SpecialFile: + return linux.DT_REG + case Symlink: + return linux.DT_LNK + case Directory, SpecialDirectory: + return linux.DT_DIR + case Pipe: + return linux.DT_FIFO + case CharacterDevice: + return linux.DT_CHR + case BlockDevice: + return linux.DT_BLK + case Socket: + return linux.DT_SOCK + default: + return linux.DT_UNKNOWN + } +} + +// ToInodeType coverts a linux file type to InodeType. +func ToInodeType(linuxFileType linux.FileMode) InodeType { + switch linuxFileType { + case linux.ModeRegular: + return RegularFile + case linux.ModeDirectory: + return Directory + case linux.ModeSymlink: + return Symlink + case linux.ModeNamedPipe: + return Pipe + case linux.ModeCharacterDevice: + return CharacterDevice + case linux.ModeBlockDevice: + return BlockDevice + case linux.ModeSocket: + return Socket + default: + panic(fmt.Sprintf("unknown file mode: %d", linuxFileType)) + } +} + +// StableAttr contains Inode attributes that will be stable throughout the +// lifetime of the Inode. +// +// +stateify savable +type StableAttr struct { + // Type is the InodeType of a InodeOperations. + Type InodeType + + // DeviceID is the device on which a InodeOperations resides. + DeviceID uint64 + + // InodeID uniquely identifies InodeOperations on its device. + InodeID uint64 + + // BlockSize is the block size of data backing this InodeOperations. + BlockSize int64 + + // DeviceFileMajor is the major device number of this Node, if it is a + // device file. + DeviceFileMajor uint16 + + // DeviceFileMinor is the minor device number of this Node, if it is a + // device file. + DeviceFileMinor uint32 +} + +// IsRegular returns true if StableAttr.Type matches a regular file. +func IsRegular(s StableAttr) bool { + return s.Type == RegularFile +} + +// IsFile returns true if StableAttr.Type matches any type of file. +func IsFile(s StableAttr) bool { + return s.Type == RegularFile || s.Type == SpecialFile +} + +// IsDir returns true if StableAttr.Type matches any type of directory. +func IsDir(s StableAttr) bool { + return s.Type == Directory || s.Type == SpecialDirectory +} + +// IsSymlink returns true if StableAttr.Type matches a symlink. +func IsSymlink(s StableAttr) bool { + return s.Type == Symlink +} + +// IsPipe returns true if StableAttr.Type matches any type of pipe. +func IsPipe(s StableAttr) bool { + return s.Type == Pipe +} + +// IsAnonymous returns true if StableAttr.Type matches any type of anonymous. +func IsAnonymous(s StableAttr) bool { + return s.Type == Anonymous +} + +// IsSocket returns true if StableAttr.Type matches any type of socket. +func IsSocket(s StableAttr) bool { + return s.Type == Socket +} + +// IsCharDevice returns true if StableAttr.Type matches a character device. +func IsCharDevice(s StableAttr) bool { + return s.Type == CharacterDevice +} + +// UnstableAttr contains Inode attributes that may change over the lifetime +// of the Inode. +// +// +stateify savable +type UnstableAttr struct { + // Size is the file size in bytes. + Size int64 + + // Usage is the actual data usage in bytes. + Usage int64 + + // Perms is the protection (read/write/execute for user/group/other). + Perms FilePermissions + + // Owner describes the ownership of this file. + Owner FileOwner + + // AccessTime is the time of last access + AccessTime ktime.Time + + // ModificationTime is the time of last modification. + ModificationTime ktime.Time + + // StatusChangeTime is the time of last attribute modification. + StatusChangeTime ktime.Time + + // Links is the number of hard links. + Links uint64 +} + +// SetOwner sets the owner and group if they are valid. +// +// This method is NOT thread-safe. Callers must prevent concurrent calls. +func (ua *UnstableAttr) SetOwner(ctx context.Context, owner FileOwner) { + if owner.UID.Ok() { + ua.Owner.UID = owner.UID + } + if owner.GID.Ok() { + ua.Owner.GID = owner.GID + } + ua.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// SetPermissions sets the permissions. +// +// This method is NOT thread-safe. Callers must prevent concurrent calls. +func (ua *UnstableAttr) SetPermissions(ctx context.Context, p FilePermissions) { + ua.Perms = p + ua.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// SetTimestamps sets the timestamps according to the TimeSpec. +// +// This method is NOT thread-safe. Callers must prevent concurrent calls. +func (ua *UnstableAttr) SetTimestamps(ctx context.Context, ts TimeSpec) { + if ts.ATimeOmit && ts.MTimeOmit { + return + } + + now := ktime.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATimeSetSystemTime { + ua.AccessTime = now + } else { + ua.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTimeSetSystemTime { + ua.ModificationTime = now + } else { + ua.ModificationTime = ts.MTime + } + } + ua.StatusChangeTime = now +} + +// WithCurrentTime returns u with AccessTime == ModificationTime == current time. +func WithCurrentTime(ctx context.Context, u UnstableAttr) UnstableAttr { + t := ktime.NowFromContext(ctx) + u.AccessTime = t + u.ModificationTime = t + u.StatusChangeTime = t + return u +} + +// AttrMask contains fields to mask StableAttr and UnstableAttr. +// +// +stateify savable +type AttrMask struct { + Type bool + DeviceID bool + InodeID bool + BlockSize bool + Size bool + Usage bool + Perms bool + UID bool + GID bool + AccessTime bool + ModificationTime bool + StatusChangeTime bool + Links bool +} + +// Empty returns true if all fields in AttrMask are false. +func (a AttrMask) Empty() bool { + return a == AttrMask{} +} + +// PermMask are file access permissions. +// +// +stateify savable +type PermMask struct { + // Read indicates reading is permitted. + Read bool + + // Write indicates writing is permitted. + Write bool + + // Execute indicates execution is permitted. + Execute bool +} + +// OnlyRead returns true when only the read bit is set. +func (p PermMask) OnlyRead() bool { + return p.Read && !p.Write && !p.Execute +} + +// String implements the fmt.Stringer interface for PermMask. +func (p PermMask) String() string { + return fmt.Sprintf("PermMask{Read: %v, Write: %v, Execute: %v}", p.Read, p.Write, p.Execute) +} + +// Mode returns the system mode (syscall.S_IXOTH, etc.) for these permissions +// in the "other" bits. +func (p PermMask) Mode() (mode os.FileMode) { + if p.Read { + mode |= syscall.S_IROTH + } + if p.Write { + mode |= syscall.S_IWOTH + } + if p.Execute { + mode |= syscall.S_IXOTH + } + return +} + +// SupersetOf returns true iff the permissions in p are a superset of the +// permissions in other. +func (p PermMask) SupersetOf(other PermMask) bool { + if !p.Read && other.Read { + return false + } + if !p.Write && other.Write { + return false + } + if !p.Execute && other.Execute { + return false + } + return true +} + +// FilePermissions represents the permissions of a file, with +// Read/Write/Execute bits for user, group, and other. +// +// +stateify savable +type FilePermissions struct { + User PermMask + Group PermMask + Other PermMask + + // Sticky, if set on directories, restricts renaming and deletion of + // files in those directories to the directory owner, file owner, or + // CAP_FOWNER. The sticky bit is ignored when set on other files. + Sticky bool + + // SetUID executables can call UID-setting syscalls without CAP_SETUID. + SetUID bool + + // SetGID executables can call GID-setting syscalls without CAP_SETGID. + SetGID bool +} + +// PermsFromMode takes the Other permissions (last 3 bits) of a FileMode and +// returns a set of PermMask. +func PermsFromMode(mode linux.FileMode) (perms PermMask) { + perms.Read = mode&linux.ModeOtherRead != 0 + perms.Write = mode&linux.ModeOtherWrite != 0 + perms.Execute = mode&linux.ModeOtherExec != 0 + return +} + +// FilePermsFromP9 converts a p9.FileMode to a FilePermissions struct. +func FilePermsFromP9(mode p9.FileMode) FilePermissions { + return FilePermsFromMode(linux.FileMode(mode)) +} + +// FilePermsFromMode converts a system file mode to a FilePermissions struct. +func FilePermsFromMode(mode linux.FileMode) (fp FilePermissions) { + perm := mode.Permissions() + fp.Other = PermsFromMode(perm) + fp.Group = PermsFromMode(perm >> 3) + fp.User = PermsFromMode(perm >> 6) + fp.Sticky = mode&linux.ModeSticky == linux.ModeSticky + fp.SetUID = mode&linux.ModeSetUID == linux.ModeSetUID + fp.SetGID = mode&linux.ModeSetGID == linux.ModeSetGID + return +} + +// LinuxMode returns the linux mode_t representation of these permissions. +func (f FilePermissions) LinuxMode() linux.FileMode { + m := linux.FileMode(f.User.Mode()<<6 | f.Group.Mode()<<3 | f.Other.Mode()) + if f.SetUID { + m |= linux.ModeSetUID + } + if f.SetGID { + m |= linux.ModeSetGID + } + if f.Sticky { + m |= linux.ModeSticky + } + return m +} + +// OSMode returns the Go runtime's OS independent os.FileMode representation of +// these permissions. +func (f FilePermissions) OSMode() os.FileMode { + m := os.FileMode(f.User.Mode()<<6 | f.Group.Mode()<<3 | f.Other.Mode()) + if f.SetUID { + m |= os.ModeSetuid + } + if f.SetGID { + m |= os.ModeSetgid + } + if f.Sticky { + m |= os.ModeSticky + } + return m +} + +// AnyExecute returns true if any of U/G/O have the execute bit set. +func (f FilePermissions) AnyExecute() bool { + return f.User.Execute || f.Group.Execute || f.Other.Execute +} + +// AnyWrite returns true if any of U/G/O have the write bit set. +func (f FilePermissions) AnyWrite() bool { + return f.User.Write || f.Group.Write || f.Other.Write +} + +// AnyRead returns true if any of U/G/O have the read bit set. +func (f FilePermissions) AnyRead() bool { + return f.User.Read || f.Group.Read || f.Other.Read +} + +// FileOwner represents ownership of a file. +// +// +stateify savable +type FileOwner struct { + UID auth.KUID + GID auth.KGID +} + +// RootOwner corresponds to KUID/KGID 0/0. +var RootOwner = FileOwner{ + UID: auth.RootKUID, + GID: auth.RootKGID, +} diff --git a/pkg/sentry/fs/context.go b/pkg/sentry/fs/context.go new file mode 100644 index 000000000..0fbd60056 --- /dev/null +++ b/pkg/sentry/fs/context.go @@ -0,0 +1,138 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" +) + +// contextID is the fs package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxRoot is a Context.Value key for a Dirent. + CtxRoot contextID = iota + + // CtxDirentCacheLimiter is a Context.Value key for DirentCacheLimiter. + CtxDirentCacheLimiter +) + +// ContextCanAccessFile determines whether `file` can be accessed in the requested way +// (for reading, writing, or execution) using the caller's credentials and user +// namespace, as does Linux's fs/namei.c:generic_permission. +func ContextCanAccessFile(ctx context.Context, inode *Inode, reqPerms PermMask) bool { + creds := auth.CredentialsFromContext(ctx) + uattr, err := inode.UnstableAttr(ctx) + if err != nil { + return false + } + + p := uattr.Perms.Other + // Are we owner or in group? + if uattr.Owner.UID == creds.EffectiveKUID { + p = uattr.Perms.User + } else if creds.InGroup(uattr.Owner.GID) { + p = uattr.Perms.Group + } + + // Do not allow programs to be executed if MS_NOEXEC is set. + if IsFile(inode.StableAttr) && reqPerms.Execute && inode.MountSource.Flags.NoExec { + return false + } + + // Are permissions satisfied without capability checks? + if p.SupersetOf(reqPerms) { + return true + } + + if IsDir(inode.StableAttr) { + // CAP_DAC_OVERRIDE can override any perms on directories. + if inode.CheckCapability(ctx, linux.CAP_DAC_OVERRIDE) { + return true + } + + // CAP_DAC_READ_SEARCH can normally only override Read perms, + // but for directories it can also override execution. + if !reqPerms.Write && inode.CheckCapability(ctx, linux.CAP_DAC_READ_SEARCH) { + return true + } + } + + // CAP_DAC_OVERRIDE can always override Read/Write. + // Can override executable only when at least one execute bit is set. + if !reqPerms.Execute || uattr.Perms.AnyExecute() { + if inode.CheckCapability(ctx, linux.CAP_DAC_OVERRIDE) { + return true + } + } + + // Read perms can be overridden by CAP_DAC_READ_SEARCH. + if reqPerms.OnlyRead() && inode.CheckCapability(ctx, linux.CAP_DAC_READ_SEARCH) { + return true + } + return false +} + +// FileOwnerFromContext returns a FileOwner using the effective user and group +// IDs used by ctx. +func FileOwnerFromContext(ctx context.Context) FileOwner { + creds := auth.CredentialsFromContext(ctx) + return FileOwner{creds.EffectiveKUID, creds.EffectiveKGID} +} + +// RootFromContext returns the root of the virtual filesystem observed by ctx, +// or nil if ctx is not associated with a virtual filesystem. If +// RootFromContext returns a non-nil fs.Dirent, a reference is taken on it. +func RootFromContext(ctx context.Context) *Dirent { + if v := ctx.Value(CtxRoot); v != nil { + return v.(*Dirent) + } + return nil +} + +// DirentCacheLimiterFromContext returns the DirentCacheLimiter used by ctx, or +// nil if ctx does not have a dirent cache limiter. +func DirentCacheLimiterFromContext(ctx context.Context) *DirentCacheLimiter { + if v := ctx.Value(CtxDirentCacheLimiter); v != nil { + return v.(*DirentCacheLimiter) + } + return nil +} + +type rootContext struct { + context.Context + root *Dirent +} + +// WithRoot returns a copy of ctx with the given root. +func WithRoot(ctx context.Context, root *Dirent) context.Context { + return &rootContext{ + Context: ctx, + root: root, + } +} + +// Value implements Context.Value. +func (rc rootContext) Value(key interface{}) interface{} { + switch key { + case CtxRoot: + rc.root.IncRef() + return rc.root + default: + return rc.Context.Value(key) + } +} diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go new file mode 100644 index 000000000..ab1424c95 --- /dev/null +++ b/pkg/sentry/fs/copy_up.go @@ -0,0 +1,436 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +// copyUp copies a file in an overlay from a lower filesystem to an +// upper filesytem so that the file can be modified in the upper +// filesystem. Copying a file involves several steps: +// +// - All parent directories of the file are created in the upper +// filesystem if they don't exist there. For instance: +// +// upper /dir0 +// lower /dir0/dir1/file +// +// copyUp of /dir0/dir1/file creates /dir0/dir1 in order to create +// /dir0/dir1/file. +// +// - The file content is copied from the lower file to the upper +// file. For symlinks this is the symlink target. For directories, +// upper directory entries are merged with lower directory entries +// so there is no need to copy any entries. +// +// - A subset of file attributes of the lower file are set on the +// upper file. These are the file owner, the file timestamps, +// and all non-overlay extended attributes. copyUp will fail if +// the upper filesystem does not support the setting of these +// attributes. +// +// The file's permissions are set when the file is created and its +// size will be brought up to date when its contents are copied. +// Notably no attempt is made to bring link count up to date because +// hard links are currently not preserved across overlay filesystems. +// +// - Memory mappings of the lower file are invalidated and memory +// references are transferred to the upper file. From this point on, +// memory mappings of the file will be backed by content in the upper +// filesystem. +// +// Synchronization: +// +// copyUp synchronizes with rename(2) using renameMu to ensure that +// parentage does not change while a file is being copied. In the context +// of rename(2), copyUpLockedForRename should be used to avoid deadlock on +// renameMu. +// +// The following operations synchronize with copyUp using copyMu: +// +// - InodeOperations, i.e. to ensure that looking up a directory takes +// into account new upper filesystem directories created by copy up, +// which subsequently can be modified. +// +// - FileOperations, i.e. to ensure that reading from a file does not +// continue using a stale, lower filesystem handle when the file is +// written to. +// +// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu. +// +// Caveats: +// +// If any step in copying up a file fails, copyUp cleans the upper +// filesystem of any partially up-to-date file. If this cleanup fails, +// the overlay may be in an unacceptable, inconsistent state, so copyUp +// panics. If copyUp fails because any step (above) fails, a generic +// error is returned. +// +// copyUp currently makes no attempt to optimize copying up file content. +// For large files, this means that copyUp blocks until the entire file +// is copied synchronously. +func copyUp(ctx context.Context, d *Dirent) error { + renameMu.RLock() + defer renameMu.RUnlock() + return copyUpLockedForRename(ctx, d) +} + +// copyUpLockedForRename is the same as copyUp except that it does not lock +// renameMu. +// +// It copies each component of d that does not yet exist in the upper +// filesystem. If d already exists in the upper filesystem, it is a no-op. +// +// Any error returned indicates a failure to copy all of d. This may +// leave the upper filesystem filled with any number of parent directories +// but the upper filesystem will never be in an inconsistent state. +// +// Preconditions: +// - d.Inode.overlay is non-nil. +func copyUpLockedForRename(ctx context.Context, d *Dirent) error { + for { + // Did we race with another copy up or does there + // already exist something in the upper filesystem + // for d? + d.Inode.overlay.copyMu.RLock() + if d.Inode.overlay.upper != nil { + d.Inode.overlay.copyMu.RUnlock() + // Done, d is in the upper filesystem. + return nil + } + d.Inode.overlay.copyMu.RUnlock() + + // Find the next component to copy up. We will work our way + // down to the last component of d and finally copy it. + next := findNextCopyUp(ctx, d) + + // Attempt to copy. + if err := doCopyUp(ctx, next); err != nil { + return err + } + } +} + +// findNextCopyUp finds the next component of d from root that does not +// yet exist in the upper filesystem. The parent of this component is +// also returned, which is the root of the overlay in the worst case. +func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent { + next := d + for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ { + // Does this parent have a non-nil upper Inode? + parent.Inode.overlay.copyMu.RLock() + if parent.Inode.overlay.upper != nil { + parent.Inode.overlay.copyMu.RUnlock() + // Note that since we found an upper, it is stable. + return next + } + parent.Inode.overlay.copyMu.RUnlock() + + // Continue searching for a parent with a non-nil + // upper Inode. + next = parent + parent = next.parent + } +} + +func doCopyUp(ctx context.Context, d *Dirent) error { + // Fail fast on Inode types we won't be able to copy up anyways. These + // Inodes may block in GetFile while holding copyMu for reading. If we + // then try to take copyMu for writing here, we'd deadlock. + t := d.Inode.overlay.lower.StableAttr.Type + if t != RegularFile && t != Directory && t != Symlink { + return syserror.EINVAL + } + + // Wait to get exclusive access to the upper Inode. + d.Inode.overlay.copyMu.Lock() + defer d.Inode.overlay.copyMu.Unlock() + if d.Inode.overlay.upper != nil { + // We raced with another doCopyUp, no problem. + return nil + } + + // Perform the copy. + return copyUpLocked(ctx, d.parent, d) +} + +// copyUpLocked creates a copy of next in the upper filesystem of parent. +// +// copyUpLocked must be called with d.Inode.overlay.copyMu locked. +// +// Returns a generic error on failure. +// +// Preconditions: +// - parent.Inode.overlay.upper must be non-nil. +// - next.Inode.overlay.copyMu must be locked writable. +// - next.Inode.overlay.lower must be non-nil. +// - next.Inode.overlay.lower.StableAttr.Type must be RegularFile, Directory, +// or Symlink. +// - upper filesystem must support setting file ownership and timestamps. +func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error { + // Extract the attributes of the file we wish to copy. + attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx) + if err != nil { + log.Warningf("copy up failed to get lower attributes: %v", err) + return syserror.EIO + } + + var childUpperInode *Inode + parentUpper := parent.Inode.overlay.upper + root := RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + + // Create the file in the upper filesystem and get an Inode for it. + switch next.Inode.StableAttr.Type { + case RegularFile: + childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms) + if err != nil { + log.Warningf("copy up failed to create file: %v", err) + return syserror.EIO + } + defer childFile.DecRef() + childUpperInode = childFile.Dirent.Inode + + case Directory: + if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil { + log.Warningf("copy up failed to create directory: %v", err) + return syserror.EIO + } + childUpper, err := parentUpper.Lookup(ctx, next.name) + if err != nil { + werr := fmt.Errorf("copy up failed to lookup directory: %v", err) + cleanupUpper(ctx, parentUpper, next.name, werr) + return syserror.EIO + } + defer childUpper.DecRef() + childUpperInode = childUpper.Inode + + case Symlink: + childLower := next.Inode.overlay.lower + link, err := childLower.Readlink(ctx) + if err != nil { + log.Warningf("copy up failed to read symlink value: %v", err) + return syserror.EIO + } + if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil { + log.Warningf("copy up failed to create symlink: %v", err) + return syserror.EIO + } + childUpper, err := parentUpper.Lookup(ctx, next.name) + if err != nil { + werr := fmt.Errorf("copy up failed to lookup symlink: %v", err) + cleanupUpper(ctx, parentUpper, next.name, werr) + return syserror.EIO + } + defer childUpper.DecRef() + childUpperInode = childUpper.Inode + + default: + panic(fmt.Sprintf("copy up of invalid type %v on %+v", next.Inode.StableAttr.Type, next)) + } + + // Bring file attributes up to date. This does not include size, which will be + // brought up to date with copyContentsLocked. + if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil { + werr := fmt.Errorf("copy up failed to copy up attributes: %v", err) + cleanupUpper(ctx, parentUpper, next.name, werr) + return syserror.EIO + } + + // Copy the entire file. + if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil { + werr := fmt.Errorf("copy up failed to copy up contents: %v", err) + cleanupUpper(ctx, parentUpper, next.name, werr) + return syserror.EIO + } + + lowerMappable := next.Inode.overlay.lower.Mappable() + upperMappable := childUpperInode.Mappable() + if lowerMappable != nil && upperMappable == nil { + werr := fmt.Errorf("copy up failed: cannot ensure memory mapping coherence") + cleanupUpper(ctx, parentUpper, next.name, werr) + return syserror.EIO + } + + // Propagate memory mappings to the upper Inode. + next.Inode.overlay.mapsMu.Lock() + defer next.Inode.overlay.mapsMu.Unlock() + if upperMappable != nil { + // Remember which mappings we added so we can remove them on failure. + allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange) + for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + added := make(memmap.MappingsOfRange) + for m := range seg.Value() { + if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable); err != nil { + for m := range added { + upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable) + } + for mr, mappings := range allAdded { + for m := range mappings { + upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start, m.Writable) + } + } + return err + } + added[m] = struct{}{} + } + allAdded[seg.Range()] = added + } + } + + // Take a reference on the upper Inode (transferred to + // next.Inode.overlay.upper) and make new translations use it. + next.Inode.overlay.dataMu.Lock() + childUpperInode.IncRef() + next.Inode.overlay.upper = childUpperInode + next.Inode.overlay.dataMu.Unlock() + + // Invalidate existing translations through the lower Inode. + next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{}) + + // Remove existing memory mappings from the lower Inode. + if lowerMappable != nil { + for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + for m := range seg.Value() { + lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable) + } + } + } + + return nil +} + +// cleanupUpper is called when copy-up fails. It logs the copy-up error and +// attempts to remove name from parent. If that fails, then it panics. +func cleanupUpper(ctx context.Context, parent *Inode, name string, copyUpErr error) { + log.Warningf(copyUpErr.Error()) + if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil { + // Unfortunately we don't have much choice. We shouldn't + // willingly give the caller access to a nonsense filesystem. + panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: copyUp got error: %v; then cleanup failed to remove %q from upper filesystem: %v.", copyUpErr, name, err)) + } +} + +// copyUpBuffers is a buffer pool for copying file content. The buffer +// size is the same used by io.Copy. +var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }} + +// copyContentsLocked copies the contents of lower to upper. It panics if +// less than size bytes can be copied. +func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size int64) error { + // We don't support copying up for anything other than regular files. + if lower.StableAttr.Type != RegularFile { + return nil + } + + // Get a handle to the upper filesystem, which we will write to. + upperFile, err := overlayFile(ctx, upper, FileFlags{Write: true}) + if err != nil { + return err + } + defer upperFile.DecRef() + + // Get a handle to the lower filesystem, which we will read from. + lowerFile, err := overlayFile(ctx, lower, FileFlags{Read: true}) + if err != nil { + return err + } + defer lowerFile.DecRef() + + // Use a buffer pool to minimize allocations. + buf := copyUpBuffers.Get().([]byte) + defer copyUpBuffers.Put(buf) + + // Transfer the contents. + // + // One might be able to optimize this by doing parallel reads, parallel writes and reads, larger + // buffers, etc. But we really don't know anything about the underlying implementation, so these + // optimizations could be self-defeating. So we leave this as simple as possible. + var offset int64 + for { + nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset) + if err != nil && err != io.EOF { + return err + } + if nr == 0 { + if offset != size { + // Same as in cleanupUpper, we cannot live + // with ourselves if we do anything less. + panic(fmt.Sprintf("filesystem is in an inconsistent state: wrote only %d bytes of %d sized file", offset, size)) + } + return nil + } + nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset) + if err != nil { + return err + } + offset += nw + } +} + +// copyAttributesLocked copies a subset of lower's attributes to upper, +// specifically owner, timestamps (except of status change time), and +// extended attributes. Notably no attempt is made to copy link count. +// Size and permissions are set on upper when the file content is copied +// and when the file is created respectively. +func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error { + // Extract attributes from the lower filesystem. + lowerAttr, err := lower.UnstableAttr(ctx) + if err != nil { + return err + } + lowerXattr, err := lower.ListXattr(ctx, linux.XATTR_SIZE_MAX) + if err != nil && err != syserror.EOPNOTSUPP { + return err + } + + // Set the attributes on the upper filesystem. + if err := upper.InodeOperations.SetOwner(ctx, upper, lowerAttr.Owner); err != nil { + return err + } + if err := upper.InodeOperations.SetTimestamps(ctx, upper, TimeSpec{ + ATime: lowerAttr.AccessTime, + MTime: lowerAttr.ModificationTime, + }); err != nil { + return err + } + for name := range lowerXattr { + // Don't copy-up attributes that configure an overlay in the + // lower. + if isXattrOverlay(name) { + continue + } + value, err := lower.GetXattr(ctx, name, linux.XATTR_SIZE_MAX) + if err != nil { + return err + } + if err := upper.InodeOperations.SetXattr(ctx, upper, name, value, 0 /* flags */); err != nil { + return err + } + } + return nil +} diff --git a/pkg/sentry/fs/copy_up_test.go b/pkg/sentry/fs/copy_up_test.go new file mode 100644 index 000000000..91792d9fe --- /dev/null +++ b/pkg/sentry/fs/copy_up_test.go @@ -0,0 +1,183 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "testing" + + "gvisor.dev/gvisor/pkg/sentry/fs" + _ "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/usermem" +) + +const ( + // origFileSize is the original file size. This many bytes should be + // copied up before the test file is modified. + origFileSize = 4096 + + // truncatedFileSize is the size to truncate all test files. + truncateFileSize = 10 +) + +// TestConcurrentCopyUp is a copy up stress test for an overlay. +// +// It creates a 64-level deep directory tree in the lower filesystem and +// populates the last subdirectory with 64 files containing random content: +// +// /lower +// /sudir0/.../subdir63/ +// /file0 +// ... +// /file63 +// +// The files are truncated concurrently by 4 goroutines per file. +// These goroutines contend with copying up all parent 64 subdirectories +// as well as the final file content. +// +// At the end of the test, we assert that the files respect the new truncated +// size and contain the content we expect. +func TestConcurrentCopyUp(t *testing.T) { + ctx := contexttest.Context(t) + files := makeOverlayTestFiles(t) + + var wg sync.WaitGroup + for _, file := range files { + for i := 0; i < 4; i++ { + wg.Add(1) + go func(o *overlayTestFile) { + if err := o.File.Dirent.Inode.Truncate(ctx, o.File.Dirent, truncateFileSize); err != nil { + t.Fatalf("failed to copy up: %v", err) + } + wg.Done() + }(file) + } + } + wg.Wait() + + for _, file := range files { + got := make([]byte, origFileSize) + n, err := file.File.Readv(ctx, usermem.BytesIOSequence(got)) + if int(n) != truncateFileSize { + t.Fatalf("read %d bytes from file, want %d", n, truncateFileSize) + } + if err != nil && err != io.EOF { + t.Fatalf("read got error %v, want nil", err) + } + if !bytes.Equal(got[:n], file.content[:truncateFileSize]) { + t.Fatalf("file content is %v, want %v", got[:n], file.content[:truncateFileSize]) + } + } +} + +type overlayTestFile struct { + File *fs.File + name string + content []byte +} + +func makeOverlayTestFiles(t *testing.T) []*overlayTestFile { + ctx := contexttest.Context(t) + + // Create a lower tmpfs mount. + fsys, _ := fs.FindFilesystem("tmpfs") + lower, err := fsys.Mount(contexttest.Context(t), "", fs.MountSourceFlags{}, "", nil) + if err != nil { + t.Fatalf("failed to mount tmpfs: %v", err) + } + lowerRoot := fs.NewDirent(ctx, lower, "") + + // Make a deep set of subdirectories that everyone shares. + next := lowerRoot + for i := 0; i < 64; i++ { + name := fmt.Sprintf("subdir%d", i) + err := next.CreateDirectory(ctx, lowerRoot, name, fs.FilePermsFromMode(0777)) + if err != nil { + t.Fatalf("failed to create dir %q: %v", name, err) + } + next, err = next.Walk(ctx, lowerRoot, name) + if err != nil { + t.Fatalf("failed to walk to %q: %v", name, err) + } + } + + // Make a bunch of files in the last directory. + var files []*overlayTestFile + for i := 0; i < 64; i++ { + name := fmt.Sprintf("file%d", i) + f, err := next.Create(ctx, next, name, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + t.Fatalf("failed to create file %q: %v", name, err) + } + defer f.DecRef() + + relname, _ := f.Dirent.FullName(lowerRoot) + + o := &overlayTestFile{ + name: relname, + content: make([]byte, origFileSize), + } + + if _, err := rand.Read(o.content); err != nil { + t.Fatalf("failed to read from /dev/urandom: %v", err) + } + + if _, err := f.Writev(ctx, usermem.BytesIOSequence(o.content)); err != nil { + t.Fatalf("failed to write content to file %q: %v", name, err) + } + + files = append(files, o) + } + + // Create an empty upper tmpfs mount which we will copy up into. + upper, err := fsys.Mount(ctx, "", fs.MountSourceFlags{}, "", nil) + if err != nil { + t.Fatalf("failed to mount tmpfs: %v", err) + } + + // Construct an overlay root. + overlay, err := fs.NewOverlayRoot(ctx, upper, lower, fs.MountSourceFlags{}) + if err != nil { + t.Fatalf("failed to construct overlay root: %v", err) + } + + // Create a MountNamespace to traverse the file system. + mns, err := fs.NewMountNamespace(ctx, overlay) + if err != nil { + t.Fatalf("failed to construct mount manager: %v", err) + } + + // Walk to all of the files in the overlay, open them readable. + for _, f := range files { + maxTraversals := uint(0) + d, err := mns.FindInode(ctx, mns.Root(), mns.Root(), f.name, &maxTraversals) + if err != nil { + t.Fatalf("failed to find %q: %v", f.name, err) + } + defer d.DecRef() + + f.File, err = d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("failed to open file %q readable: %v", f.name, err) + } + } + + return files +} diff --git a/pkg/sentry/fs/dentry.go b/pkg/sentry/fs/dentry.go new file mode 100644 index 000000000..6b2699f15 --- /dev/null +++ b/pkg/sentry/fs/dentry.go @@ -0,0 +1,234 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sort" + + "gvisor.dev/gvisor/pkg/sentry/device" +) + +// DentAttr is the metadata of a directory entry. It is a subset of StableAttr. +// +// +stateify savable +type DentAttr struct { + // Type is the InodeType of an Inode. + Type InodeType + + // InodeID uniquely identifies an Inode on a device. + InodeID uint64 +} + +// GenericDentAttr returns a generic DentAttr where: +// +// Type == nt +// InodeID == the inode id of a new inode on device. +func GenericDentAttr(nt InodeType, device *device.Device) DentAttr { + return DentAttr{ + Type: nt, + InodeID: device.NextIno(), + } +} + +// DentrySerializer serializes a directory entry. +type DentrySerializer interface { + // CopyOut serializes a directory entry based on its name and attributes. + CopyOut(name string, attributes DentAttr) error + + // Written returns the number of bytes written. + Written() int +} + +// CollectEntriesSerializer copies DentAttrs to Entries. The order in +// which entries are encountered is preserved in Order. +type CollectEntriesSerializer struct { + Entries map[string]DentAttr + Order []string +} + +// CopyOut implements DentrySerializer.CopyOut. +func (c *CollectEntriesSerializer) CopyOut(name string, attr DentAttr) error { + if c.Entries == nil { + c.Entries = make(map[string]DentAttr) + } + c.Entries[name] = attr + c.Order = append(c.Order, name) + return nil +} + +// Written implements DentrySerializer.Written. +func (c *CollectEntriesSerializer) Written() int { + return len(c.Entries) +} + +// DirCtx is used in FileOperations.IterateDir to emit directory entries. It is +// not thread-safe. +type DirCtx struct { + // Serializer is used to serialize the node attributes. + Serializer DentrySerializer + + // attrs are DentAttrs + attrs map[string]DentAttr + + // DirCursor is the directory cursor. + DirCursor *string +} + +// DirEmit is called for each directory entry. +func (c *DirCtx) DirEmit(name string, attr DentAttr) error { + if c.Serializer != nil { + if err := c.Serializer.CopyOut(name, attr); err != nil { + return err + } + } + if c.attrs == nil { + c.attrs = make(map[string]DentAttr) + } + c.attrs[name] = attr + return nil +} + +// DentAttrs returns a map of DentAttrs corresponding to the emitted directory +// entries. +func (c *DirCtx) DentAttrs() map[string]DentAttr { + if c.attrs == nil { + c.attrs = make(map[string]DentAttr) + } + return c.attrs +} + +// GenericReaddir serializes DentAttrs based on a SortedDentryMap that must +// contain _all_ up-to-date DentAttrs under a directory. If ctx.DirCursor is +// not nil, it is updated to the name of the last DentAttr that was +// successfully serialized. +// +// Returns the number of entries serialized. +func GenericReaddir(ctx *DirCtx, s *SortedDentryMap) (int, error) { + // Retrieve the next directory entries. + var names []string + var entries map[string]DentAttr + if ctx.DirCursor != nil { + names, entries = s.GetNext(*ctx.DirCursor) + } else { + names, entries = s.GetAll() + } + + // Try to serialize each entry. + var serialized int + for _, name := range names { + // Skip "" per POSIX. Skip "." and ".." which will be added by Dirent.Readdir. + if name == "" || name == "." || name == ".." { + continue + } + + // Emit the directory entry. + if err := ctx.DirEmit(name, entries[name]); err != nil { + // Return potentially a partial serialized count. + return serialized, err + } + + // We successfully serialized this entry. + serialized++ + + // Update the cursor with the name of the entry last serialized. + if ctx.DirCursor != nil { + *ctx.DirCursor = name + } + } + + // Everything was serialized. + return serialized, nil +} + +// SortedDentryMap is a sorted map of names and fs.DentAttr entries. +// +// +stateify savable +type SortedDentryMap struct { + // names is always kept in sorted-order. + names []string + + // entries maps names to fs.DentAttrs. + entries map[string]DentAttr +} + +// NewSortedDentryMap maintains entries in name sorted order. +func NewSortedDentryMap(entries map[string]DentAttr) *SortedDentryMap { + s := &SortedDentryMap{ + names: make([]string, 0, len(entries)), + entries: entries, + } + // Don't allow s.entries to be nil, because nil maps arn't Saveable. + if s.entries == nil { + s.entries = make(map[string]DentAttr) + } + + // Collect names from entries and sort them. + for name := range s.entries { + s.names = append(s.names, name) + } + sort.Strings(s.names) + return s +} + +// GetAll returns all names and entries in s. Callers should not modify the +// returned values. +func (s *SortedDentryMap) GetAll() ([]string, map[string]DentAttr) { + return s.names, s.entries +} + +// GetNext returns names after cursor in s and all entries. +func (s *SortedDentryMap) GetNext(cursor string) ([]string, map[string]DentAttr) { + i := sort.SearchStrings(s.names, cursor) + if i == len(s.names) { + return nil, s.entries + } + + // Return everything strictly after the cursor. + if s.names[i] == cursor { + i++ + } + return s.names[i:], s.entries +} + +// Add adds an entry with the given name to the map, preserving sort order. If +// name already exists in the map, its entry will be overwritten. +func (s *SortedDentryMap) Add(name string, entry DentAttr) { + if _, ok := s.entries[name]; !ok { + // Map does not yet contain an entry with this name. We must + // insert it in s.names at the appropriate spot. + i := sort.SearchStrings(s.names, name) + s.names = append(s.names, "") + copy(s.names[i+1:], s.names[i:]) + s.names[i] = name + } + s.entries[name] = entry +} + +// Remove removes an entry with the given name from the map, preserving sort order. +func (s *SortedDentryMap) Remove(name string) { + if _, ok := s.entries[name]; !ok { + return + } + i := sort.SearchStrings(s.names, name) + copy(s.names[i:], s.names[i+1:]) + s.names = s.names[:len(s.names)-1] + delete(s.entries, name) +} + +// Contains reports whether the map contains an entry with the given name. +func (s *SortedDentryMap) Contains(name string) bool { + _, ok := s.entries[name] + return ok +} diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD new file mode 100644 index 000000000..9379a4d7b --- /dev/null +++ b/pkg/sentry/fs/dev/BUILD @@ -0,0 +1,40 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "dev", + srcs = [ + "dev.go", + "device.go", + "fs.go", + "full.go", + "net_tun.go", + "null.go", + "random.go", + "tty.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/rand", + "//pkg/safemem", + "//pkg/sentry/arch", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/memmap", + "//pkg/sentry/mm", + "//pkg/sentry/pgalloc", + "//pkg/sentry/socket/netstack", + "//pkg/syserror", + "//pkg/tcpip/link/tun", + "//pkg/usermem", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/dev/dev.go b/pkg/sentry/fs/dev/dev.go new file mode 100644 index 000000000..acbd401a0 --- /dev/null +++ b/pkg/sentry/fs/dev/dev.go @@ -0,0 +1,151 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dev provides a filesystem with simple devices. +package dev + +import ( + "math" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.dev/gvisor/pkg/sentry/inet" + "gvisor.dev/gvisor/pkg/usermem" +) + +// Memory device numbers are from Linux's drivers/char/mem.c +const ( + // Mem device major. + memDevMajor uint16 = 1 + + // Mem device minors. + nullDevMinor uint32 = 3 + zeroDevMinor uint32 = 5 + fullDevMinor uint32 = 7 + randomDevMinor uint32 = 8 + urandomDevMinor uint32 = 9 +) + +// TTY major device number comes from include/uapi/linux/major.h. +const ( + ttyDevMinor = 0 + ttyDevMajor = 5 +) + +func newCharacterDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSource, major uint16, minor uint32) *fs.Inode { + return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.CharacterDevice, + DeviceFileMajor: major, + DeviceFileMinor: minor, + }) +} + +func newMemDevice(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSource, minor uint32) *fs.Inode { + return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.CharacterDevice, + DeviceFileMajor: memDevMajor, + DeviceFileMinor: minor, + }) +} + +func newDirectory(ctx context.Context, contents map[string]*fs.Inode, msrc *fs.MountSource) *fs.Inode { + iops := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +func newSymlink(ctx context.Context, target string, msrc *fs.MountSource) *fs.Inode { + iops := ramfs.NewSymlink(ctx, fs.RootOwner, target) + return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Symlink, + }) +} + +// New returns the root node of a device filesystem. +func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + contents := map[string]*fs.Inode{ + "fd": newSymlink(ctx, "/proc/self/fd", msrc), + "stdin": newSymlink(ctx, "/proc/self/fd/0", msrc), + "stdout": newSymlink(ctx, "/proc/self/fd/1", msrc), + "stderr": newSymlink(ctx, "/proc/self/fd/2", msrc), + + "null": newMemDevice(ctx, newNullDevice(ctx, fs.RootOwner, 0666), msrc, nullDevMinor), + "zero": newMemDevice(ctx, newZeroDevice(ctx, fs.RootOwner, 0666), msrc, zeroDevMinor), + "full": newMemDevice(ctx, newFullDevice(ctx, fs.RootOwner, 0666), msrc, fullDevMinor), + + // This is not as good as /dev/random in linux because go + // runtime uses sys_random and /dev/urandom internally. + // According to 'man 4 random', this will be sufficient unless + // application uses this to generate long-lived GPG/SSL/SSH + // keys. + "random": newMemDevice(ctx, newRandomDevice(ctx, fs.RootOwner, 0444), msrc, randomDevMinor), + "urandom": newMemDevice(ctx, newRandomDevice(ctx, fs.RootOwner, 0444), msrc, urandomDevMinor), + + "shm": tmpfs.NewDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0777), msrc), + + // A devpts is typically mounted at /dev/pts to provide + // pseudoterminal support. Place an empty directory there for + // the devpts to be mounted over. + "pts": newDirectory(ctx, nil, msrc), + // Similarly, applications expect a ptmx device at /dev/ptmx + // connected to the terminals provided by /dev/pts/. Rather + // than creating a device directly (which requires a hairy + // lookup on open to determine if a devpts exists), just create + // a symlink to the ptmx provided by devpts. (The Linux devpts + // documentation recommends this). + // + // If no devpts is mounted, this will simply be a dangling + // symlink, which is fine. + "ptmx": newSymlink(ctx, "pts/ptmx", msrc), + + "tty": newCharacterDevice(ctx, newTTYDevice(ctx, fs.RootOwner, 0666), msrc, ttyDevMajor, ttyDevMinor), + } + + if isNetTunSupported(inet.StackFromContext(ctx)) { + contents["net"] = newDirectory(ctx, map[string]*fs.Inode{ + "tun": newCharacterDevice(ctx, newNetTunDevice(ctx, fs.RootOwner, 0666), msrc, netTunDevMajor, netTunDevMinor), + }, msrc) + } + + iops := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(ctx, iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +// readZeros implements fs.FileOperations.Read with infinite null bytes. +type readZeros struct{} + +// Read implements fs.FileOperations.Read. +func (*readZeros) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + return dst.ZeroOut(ctx, math.MaxInt64) +} diff --git a/pkg/sentry/fs/dev/device.go b/pkg/sentry/fs/dev/device.go new file mode 100644 index 000000000..a0493474e --- /dev/null +++ b/pkg/sentry/fs/dev/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import "gvisor.dev/gvisor/pkg/sentry/device" + +// devDevice is the pseudo-filesystem device. +var devDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/dev/fs.go b/pkg/sentry/fs/dev/fs.go new file mode 100644 index 000000000..5e518fb63 --- /dev/null +++ b/pkg/sentry/fs/dev/fs.go @@ -0,0 +1,64 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// filesystem is a devtmpfs. +// +// +stateify savable +type filesystem struct{} + +var _ fs.Filesystem = (*filesystem)(nil) + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// Name matches drivers/base/devtmpfs.c:dev_fs_type.name. +const FilesystemName = "devtmpfs" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, devtmpfs does the same thing. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a devtmpfs root that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, _ interface{}) (*fs.Inode, error) { + // devtmpfs backed by ramfs ignores bad options. See fs/ramfs/inode.c:ramfs_parse_options. + // -> we should consider parsing the mode and backing devtmpfs by this. + return New(ctx, fs.NewNonCachingMountSource(ctx, f, flags)), nil +} diff --git a/pkg/sentry/fs/dev/full.go b/pkg/sentry/fs/dev/full.go new file mode 100644 index 000000000..deb9c6ad8 --- /dev/null +++ b/pkg/sentry/fs/dev/full.go @@ -0,0 +1,81 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// fullDevice is used to implement /dev/full. +// +// +stateify savable +type fullDevice struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes +} + +var _ fs.InodeOperations = (*fullDevice)(nil) + +func newFullDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *fullDevice { + f := &fullDevice{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + } + return f +} + +// GetFile implements fs.InodeOperations.GetFile. +func (f *fullDevice) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + return fs.NewFile(ctx, dirent, flags, &fullFileOperations{}), nil +} + +// +stateify savable +type fullFileOperations struct { + waiter.AlwaysReady `state:"nosave"` + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + readZeros `state:"nosave"` +} + +var _ fs.FileOperations = (*fullFileOperations)(nil) + +// Write implements FileOperations.Write. +func (*fullFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.ENOSPC +} diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go new file mode 100644 index 000000000..dc7ad075a --- /dev/null +++ b/pkg/sentry/fs/dev/net_tun.go @@ -0,0 +1,177 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/inet" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/socket/netstack" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/tcpip/link/tun" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +const ( + netTunDevMajor = 10 + netTunDevMinor = 200 +) + +// +stateify savable +type netTunInodeOperations struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes +} + +var _ fs.InodeOperations = (*netTunInodeOperations)(nil) + +func newNetTunDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *netTunInodeOperations { + return &netTunInodeOperations{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + } +} + +// GetFile implements fs.InodeOperations.GetFile. +func (iops *netTunInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &netTunFileOperations{}), nil +} + +// +stateify savable +type netTunFileOperations struct { + fsutil.FileNoSeek `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + device tun.Device +} + +var _ fs.FileOperations = (*netTunFileOperations)(nil) + +// Release implements fs.FileOperations.Release. +func (fops *netTunFileOperations) Release() { + fops.device.Release() +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (fops *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + request := args[1].Uint() + data := args[2].Pointer() + + switch request { + case linux.TUNSETIFF: + t := kernel.TaskFromContext(ctx) + if t == nil { + panic("Ioctl should be called from a task context") + } + if !t.HasCapability(linux.CAP_NET_ADMIN) { + return 0, syserror.EPERM + } + stack, ok := t.NetworkContext().(*netstack.Stack) + if !ok { + return 0, syserror.EINVAL + } + + var req linux.IFReq + if _, err := usermem.CopyObjectIn(ctx, io, data, &req, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + flags := usermem.ByteOrder.Uint16(req.Data[:]) + return 0, fops.device.SetIff(stack.Stack, req.Name(), flags) + + case linux.TUNGETIFF: + var req linux.IFReq + + copy(req.IFName[:], fops.device.Name()) + + // Linux adds IFF_NOFILTER (the same value as IFF_NO_PI unfortunately) when + // there is no sk_filter. See __tun_chr_ioctl() in net/drivers/tun.c. + flags := fops.device.Flags() | linux.IFF_NOFILTER + usermem.ByteOrder.PutUint16(req.Data[:], flags) + + _, err := usermem.CopyObjectOut(ctx, io, data, &req, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + default: + return 0, syserror.ENOTTY + } +} + +// Write implements fs.FileOperations.Write. +func (fops *netTunFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + data := make([]byte, src.NumBytes()) + if _, err := src.CopyIn(ctx, data); err != nil { + return 0, err + } + return fops.device.Write(data) +} + +// Read implements fs.FileOperations.Read. +func (fops *netTunFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + data, err := fops.device.Read() + if err != nil { + return 0, err + } + n, err := dst.CopyOut(ctx, data) + if n > 0 && n < len(data) { + // Not an error for partial copying. Packet truncated. + err = nil + } + return int64(n), err +} + +// Readiness implements watier.Waitable.Readiness. +func (fops *netTunFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return fops.device.Readiness(mask) +} + +// EventRegister implements watier.Waitable.EventRegister. +func (fops *netTunFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + fops.device.EventRegister(e, mask) +} + +// EventUnregister implements watier.Waitable.EventUnregister. +func (fops *netTunFileOperations) EventUnregister(e *waiter.Entry) { + fops.device.EventUnregister(e) +} + +// isNetTunSupported returns whether /dev/net/tun device is supported for s. +func isNetTunSupported(s inet.Stack) bool { + _, ok := s.(*netstack.Stack) + return ok +} diff --git a/pkg/sentry/fs/dev/null.go b/pkg/sentry/fs/dev/null.go new file mode 100644 index 000000000..aec33d0d9 --- /dev/null +++ b/pkg/sentry/fs/dev/null.go @@ -0,0 +1,131 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/mm" + "gvisor.dev/gvisor/pkg/sentry/pgalloc" + "gvisor.dev/gvisor/pkg/waiter" +) + +// +stateify savable +type nullDevice struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes +} + +var _ fs.InodeOperations = (*nullDevice)(nil) + +func newNullDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *nullDevice { + n := &nullDevice{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + } + return n +} + +// GetFile implements fs.FileOperations.GetFile. +func (n *nullDevice) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + flags.Pwrite = true + + return fs.NewFile(ctx, dirent, flags, &nullFileOperations{}), nil +} + +// +stateify savable +type nullFileOperations struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRead `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopWrite `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +var _ fs.FileOperations = (*nullFileOperations)(nil) + +// +stateify savable +type zeroDevice struct { + nullDevice +} + +var _ fs.InodeOperations = (*zeroDevice)(nil) + +func newZeroDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *zeroDevice { + zd := &zeroDevice{ + nullDevice: nullDevice{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + }, + } + return zd +} + +// GetFile implements fs.FileOperations.GetFile. +func (zd *zeroDevice) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + flags.Pwrite = true + flags.NonSeekable = true + + return fs.NewFile(ctx, dirent, flags, &zeroFileOperations{}), nil +} + +// +stateify savable +type zeroFileOperations struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopWrite `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + readZeros `state:"nosave"` +} + +var _ fs.FileOperations = (*zeroFileOperations)(nil) + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (*zeroFileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + m, err := mm.NewSharedAnonMappable(opts.Length, pgalloc.MemoryFileProviderFromContext(ctx)) + if err != nil { + return err + } + opts.MappingIdentity = m + opts.Mappable = m + return nil +} diff --git a/pkg/sentry/fs/dev/random.go b/pkg/sentry/fs/dev/random.go new file mode 100644 index 000000000..2a9bbeb18 --- /dev/null +++ b/pkg/sentry/fs/dev/random.go @@ -0,0 +1,79 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/rand" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// +stateify savable +type randomDevice struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes +} + +var _ fs.InodeOperations = (*randomDevice)(nil) + +func newRandomDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *randomDevice { + r := &randomDevice{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + } + return r +} + +// GetFile implements fs.InodeOperations.GetFile. +func (*randomDevice) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &randomFileOperations{}), nil +} + +// +stateify savable +type randomFileOperations struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopWrite `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +var _ fs.FileOperations = (*randomFileOperations)(nil) + +// Read implements fs.FileOperations.Read. +func (*randomFileOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + return dst.CopyOutFrom(ctx, safemem.FromIOReader{rand.Reader}) +} diff --git a/pkg/sentry/fs/dev/tty.go b/pkg/sentry/fs/dev/tty.go new file mode 100644 index 000000000..760ca563d --- /dev/null +++ b/pkg/sentry/fs/dev/tty.go @@ -0,0 +1,67 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/waiter" +) + +// +stateify savable +type ttyInodeOperations struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotOpenable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes +} + +var _ fs.InodeOperations = (*ttyInodeOperations)(nil) + +func newTTYDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *ttyInodeOperations { + return &ttyInodeOperations{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(mode), linux.TMPFS_MAGIC), + } +} + +// +stateify savable +type ttyFileOperations struct { + fsutil.FileNoSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopWrite `state:"nosave"` + fsutil.FileNoopRead `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +var _ fs.FileOperations = (*ttyFileOperations)(nil) diff --git a/pkg/sentry/fs/dirent.go b/pkg/sentry/fs/dirent.go new file mode 100644 index 000000000..65be12175 --- /dev/null +++ b/pkg/sentry/fs/dirent.go @@ -0,0 +1,1558 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "path" + "sync/atomic" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sentry/uniqueid" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" +) + +type globalDirentMap struct { + mu sync.Mutex + dirents map[*Dirent]struct{} +} + +func (g *globalDirentMap) add(d *Dirent) { + g.mu.Lock() + g.dirents[d] = struct{}{} + g.mu.Unlock() +} + +func (g *globalDirentMap) remove(d *Dirent) { + g.mu.Lock() + delete(g.dirents, d) + g.mu.Unlock() +} + +// allDirents keeps track of all Dirents that need to be considered in +// Save/Restore for inode mappings. +// +// Because inodes do not hold paths, but inodes for external file systems map +// to an external path, every user-visible Dirent is stored in this map and +// iterated through upon save to keep inode ID -> restore path mappings. +var allDirents = globalDirentMap{ + dirents: map[*Dirent]struct{}{}, +} + +// renameMu protects the parent of *all* Dirents. (See explanation in +// lockForRename.) +// +// See fs.go for lock ordering. +var renameMu sync.RWMutex + +// Dirent holds an Inode in memory. +// +// A Dirent may be negative or positive: +// +// A negative Dirent contains a nil Inode and indicates that a path does not exist. This +// is a convention taken from the Linux dcache, see fs/dcache.c. A negative Dirent remains +// cached until a create operation replaces it with a positive Dirent. A negative Dirent +// always has one reference owned by its parent and takes _no_ reference on its parent. This +// ensures that its parent can be unhashed regardless of negative children. +// +// A positive Dirent contains a non-nil Inode. It remains cached for as long as there remain +// references to it. A positive Dirent always takes a reference on its parent. +// +// A Dirent may be a root Dirent (parent is nil) or be parented (non-nil parent). +// +// Dirents currently do not attempt to free entries that lack application references under +// memory pressure. +// +// +stateify savable +type Dirent struct { + // AtomicRefCount is our reference count. + refs.AtomicRefCount + + // userVisible indicates whether the Dirent is visible to the user or + // not. Only user-visible Dirents should save inode mappings in + // save/restore, as only they hold the real path to the underlying + // inode. + // + // See newDirent and Dirent.afterLoad. + userVisible bool + + // Inode is the underlying file object. + // + // Inode is exported currently to assist in implementing overlay Inodes (where a + // Inode.InodeOperations.Lookup may need to merge the Inode contained in a positive Dirent with + // another Inode). This is normally done before the Dirent is parented (there are + // no external references to it). + // + // Other objects in the VFS may take a reference to this Inode but only while holding + // a reference to this Dirent. + Inode *Inode + + // name is the name (i.e. basename) of this entry. + // + // N.B. name is protected by parent.mu, not this node's mu! + name string + + // parent is the parent directory. + // + // We hold a hard reference to the parent. + // + // parent is protected by renameMu. + parent *Dirent + + // deleted may be set atomically when removed. + deleted int32 + + // mounted is true if Dirent is a mount point, similar to include/linux/dcache.h:DCACHE_MOUNTED. + mounted bool + + // direntEntry identifies this Dirent as an element in a DirentCache. DirentCaches + // and their contents are not saved. + direntEntry `state:"nosave"` + + // dirMu is a read-write mutex that protects caching decisions made by directory operations. + // Lock ordering: dirMu must be taken before mu (see below). Details: + // + // dirMu does not participate in Rename; instead mu and renameMu are used, see lockForRename. + // + // Creation and Removal operations must be synchronized with Walk to prevent stale negative + // caching. Note that this requirement is not specific to a _Dirent_ doing negative caching. + // The following race exists at any level of the VFS: + // + // For an object D that represents a directory, containing a cache of non-existent paths, + // protected by D.cacheMu: + // + // T1: T2: + // D.lookup(name) + // --> ENOENT + // D.create(name) + // --> success + // D.cacheMu.Lock + // delete(D.cache, name) + // D.cacheMu.Unlock + // D.cacheMu.Lock + // D.cache[name] = true + // D.cacheMu.Unlock + // + // D.lookup(name) + // D.cacheMu.Lock + // if D.cache[name] { + // --> ENOENT (wrong) + // } + // D.cacheMu.Lock + // + // Correct: + // + // T1: T2: + // D.cacheMu.Lock + // D.lookup(name) + // --> ENOENT + // D.cache[name] = true + // D.cacheMu.Unlock + // D.cacheMu.Lock + // D.create(name) + // --> success + // delete(D.cache, name) + // D.cacheMu.Unlock + // + // D.cacheMu.Lock + // D.lookup(name) + // --> EXISTS (right) + // D.cacheMu.Unlock + // + // Note that the above "correct" solution causes too much lock contention: all lookups are + // synchronized with each other. This is a problem because lookups are involved in any VFS + // path operation. + // + // A Dirent diverges from the single D.cacheMu and instead uses two locks: dirMu to protect + // concurrent creation/removal/lookup caching, and mu to protect the Dirent's children map + // in general. + // + // This allows for concurrent Walks to be executed in order to pipeline lookups. For instance + // for a hot directory /a/b, threads T1, T2, T3 will only block on each other update the + // children map of /a/b when their individual lookups complete. + // + // T1: T2: T3: + // stat(/a/b/c) stat(/a/b/d) stat(/a/b/e) + dirMu sync.RWMutex `state:"nosave"` + + // mu protects the below fields. Lock ordering: mu must be taken after dirMu. + mu sync.Mutex `state:"nosave"` + + // children are cached via weak references. + children map[string]*refs.WeakRef `state:".(map[string]*Dirent)"` +} + +// NewDirent returns a new root Dirent, taking the caller's reference on inode. The caller +// holds the only reference to the Dirent. Parents may call hashChild to parent this Dirent. +func NewDirent(ctx context.Context, inode *Inode, name string) *Dirent { + d := newDirent(inode, name) + allDirents.add(d) + d.userVisible = true + return d +} + +// NewTransientDirent creates a transient Dirent that shouldn't actually be +// visible to users. +// +// An Inode is required. +func NewTransientDirent(inode *Inode) *Dirent { + if inode == nil { + panic("an inode is required") + } + return newDirent(inode, "transient") +} + +func newDirent(inode *Inode, name string) *Dirent { + // The Dirent needs to maintain one reference to MountSource. + if inode != nil { + inode.MountSource.IncDirentRefs() + } + d := Dirent{ + Inode: inode, + name: name, + children: make(map[string]*refs.WeakRef), + } + d.EnableLeakCheck("fs.Dirent") + return &d +} + +// NewNegativeDirent returns a new root negative Dirent. Otherwise same as NewDirent. +func NewNegativeDirent(name string) *Dirent { + return newDirent(nil, name) +} + +// IsRoot returns true if d is a root Dirent. +func (d *Dirent) IsRoot() bool { + return d.parent == nil +} + +// IsNegative returns true if d represents a path that does not exist. +func (d *Dirent) IsNegative() bool { + return d.Inode == nil +} + +// hashChild will hash child into the children list of its new parent d. +// +// Returns (*WeakRef, true) if hashing child caused a Dirent to be unhashed. The caller must +// validate the returned unhashed weak reference. Common cases: +// +// * Remove: hashing a negative Dirent unhashes a positive Dirent (unimplemented). +// * Create: hashing a positive Dirent unhashes a negative Dirent. +// * Lookup: hashing any Dirent should not unhash any other Dirent. +// +// Preconditions: +// * d.mu must be held. +// * child must be a root Dirent. +func (d *Dirent) hashChild(child *Dirent) (*refs.WeakRef, bool) { + if !child.IsRoot() { + panic("hashChild must be a root Dirent") + } + + // Assign parentage. + child.parent = d + + // Avoid letting negative Dirents take a reference on their parent; these Dirents + // don't have a role outside of the Dirent cache and should not keep their parent + // indefinitely pinned. + if !child.IsNegative() { + // Positive dirents must take a reference on their parent. + d.IncRef() + } + + return d.hashChildParentSet(child) +} + +// hashChildParentSet will rehash child into the children list of its parent d. +// +// Assumes that child.parent = d already. +func (d *Dirent) hashChildParentSet(child *Dirent) (*refs.WeakRef, bool) { + if child.parent != d { + panic("hashChildParentSet assumes the child already belongs to the parent") + } + + // Save any replaced child so our caller can validate it. + old, ok := d.children[child.name] + + // Hash the child. + d.children[child.name] = refs.NewWeakRef(child, nil) + + // Return any replaced child. + return old, ok +} + +// SyncAll iterates through mount points under d and writes back their buffered +// modifications to filesystems. +func (d *Dirent) SyncAll(ctx context.Context) { + d.mu.Lock() + defer d.mu.Unlock() + + // For negative Dirents there is nothing to sync. By definition these are + // leaves (there is nothing left to traverse). + if d.IsNegative() { + return + } + + // There is nothing to sync for a read-only filesystem. + if !d.Inode.MountSource.Flags.ReadOnly { + // NOTE(b/34856369): This should be a mount traversal, not a Dirent + // traversal, because some Inodes that need to be synced may no longer + // be reachable by name (after sys_unlink). + // + // Write out metadata, dirty page cached pages, and sync disk/remote + // caches. + d.Inode.WriteOut(ctx) + } + + // Continue iterating through other mounted filesystems. + for _, w := range d.children { + if child := w.Get(); child != nil { + child.(*Dirent).SyncAll(ctx) + child.DecRef() + } + } +} + +// BaseName returns the base name of the dirent. +func (d *Dirent) BaseName() string { + p := d.parent + if p == nil { + return d.name + } + p.mu.Lock() + defer p.mu.Unlock() + return d.name +} + +// FullName returns the fully-qualified name and a boolean value representing +// whether this Dirent was a descendant of root. +// If the root argument is nil it is assumed to be the root of the Dirent tree. +func (d *Dirent) FullName(root *Dirent) (string, bool) { + renameMu.RLock() + defer renameMu.RUnlock() + return d.fullName(root) +} + +// fullName returns the fully-qualified name and a boolean value representing +// if the root node was reachable from this Dirent. +func (d *Dirent) fullName(root *Dirent) (string, bool) { + if d == root { + return "/", true + } + + if d.IsRoot() { + if root != nil { + // We reached the top of the Dirent tree but did not encounter + // the given root. Return false for reachable so the caller + // can handle this situation accordingly. + return d.name, false + } + return d.name, true + } + + // Traverse up to parent. + d.parent.mu.Lock() + name := d.name + d.parent.mu.Unlock() + parentName, reachable := d.parent.fullName(root) + s := path.Join(parentName, name) + if atomic.LoadInt32(&d.deleted) != 0 { + return s + " (deleted)", reachable + } + return s, reachable +} + +// MountRoot finds and returns the mount-root for a given dirent. +func (d *Dirent) MountRoot() *Dirent { + renameMu.RLock() + defer renameMu.RUnlock() + + mountRoot := d + for !mountRoot.mounted && mountRoot.parent != nil { + mountRoot = mountRoot.parent + } + mountRoot.IncRef() + return mountRoot +} + +// descendantOf returns true if the receiver dirent is equal to, or a +// descendant of, the argument dirent. +// +// d.mu must be held. +func (d *Dirent) descendantOf(p *Dirent) bool { + if d == p { + return true + } + if d.IsRoot() { + return false + } + return d.parent.descendantOf(p) +} + +// walk walks to path name starting at the dirent, and will not traverse above +// root Dirent. +// +// If walkMayUnlock is true then walk can unlock d.mu to execute a slow +// Inode.Lookup, otherwise walk will keep d.mu locked. +// +// Preconditions: +// - renameMu must be held for reading. +// - d.mu must be held. +// - name must must not contain "/"s. +func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnlock bool) (*Dirent, error) { + if !IsDir(d.Inode.StableAttr) { + return nil, syscall.ENOTDIR + } + + if name == "" || name == "." { + d.IncRef() + return d, nil + } else if name == ".." { + // Respect the chroot. Note that in Linux there is no check to enforce + // that d is a descendant of root. + if d == root { + d.IncRef() + return d, nil + } + // Are we already at the root? Then ".." is ".". + if d.IsRoot() { + d.IncRef() + return d, nil + } + d.parent.IncRef() + return d.parent, nil + } + + if w, ok := d.children[name]; ok { + // Try to resolve the weak reference to a hard reference. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + // Is this a negative Dirent? + if cd.IsNegative() { + // Don't leak a reference; this doesn't matter as much for negative Dirents, + // which don't hold a hard reference on their parent (their parent holds a + // hard reference on them, and they contain virtually no state). But this is + // good house-keeping. + child.DecRef() + return nil, syscall.ENOENT + } + + // Do we need to revalidate this child? + // + // We never allow the file system to revalidate mounts, that could cause them + // to unexpectedly drop out before umount. + if cd.mounted || !cd.Inode.MountSource.Revalidate(ctx, name, d.Inode, cd.Inode) { + // Good to go. This is the fast-path. + return cd, nil + } + + // If we're revalidating a child, we must ensure all inotify watches release + // their pins on the child. Inotify doesn't properly support filesystems that + // revalidate dirents (since watches are lost on revalidation), but if we fail + // to unpin the watches child will never be GCed. + cd.Inode.Watches.Unpin(cd) + + // This child needs to be revalidated, fallthrough to unhash it. Make sure + // to not leak a reference from Get(). + // + // Note that previous lookups may still have a reference to this stale child; + // this can't be helped, but we can ensure that *new* lookups are up-to-date. + child.DecRef() + } + + // Either our weak reference expired or we need to revalidate it. Unhash child first, we're + // about to replace it. + delete(d.children, name) + w.Drop() + } + + // Slow path: load the InodeOperations into memory. Since this is a hot path and the lookup may be + // expensive, if possible release the lock and re-acquire it. + if walkMayUnlock { + d.mu.Unlock() + } + c, err := d.Inode.Lookup(ctx, name) + if walkMayUnlock { + d.mu.Lock() + } + // No dice. + if err != nil { + return nil, err + } + + // Sanity check c, its name must be consistent. + if c.name != name { + panic(fmt.Sprintf("lookup from %q to %q returned unexpected name %q", d.name, name, c.name)) + } + + // Now that we have the lock again, check if we raced. + if w, ok := d.children[name]; ok { + // Someone else looked up or created a child at name before us. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + // There are active references to the existing child, prefer it to the one we + // retrieved from Lookup. Likely the Lookup happened very close to the insertion + // of child, so considering one stale over the other is fairly arbitrary. + c.DecRef() + + // The child that was installed could be negative. + if cd.IsNegative() { + // If so, don't leak a reference and short circuit. + child.DecRef() + return nil, syscall.ENOENT + } + + // We make the judgement call that if c raced with cd they are close enough to have + // the same staleness, so we don't attempt to revalidate cd. In Linux revalidations + // can continue indefinitely (see fs/namei.c, retry_estale); we try to avoid this. + return cd, nil + } + + // Weak reference expired. We went through a full cycle of create/destroy in the time + // we did the Inode.Lookup. Fully drop the weak reference and fallback to using the child + // we looked up. + delete(d.children, name) + w.Drop() + } + + // Give the looked up child a parent. We cannot kick out entries, since we just checked above + // that there is nothing at name in d's children list. + if _, kicked := d.hashChild(c); kicked { + // Yell loudly. + panic(fmt.Sprintf("hashed child %q over existing child", c.name)) + } + + // Is this a negative Dirent? + if c.IsNegative() { + // Don't drop a reference on the negative Dirent, it was just installed and this is the + // only reference we'll ever get. d owns the reference. + return nil, syscall.ENOENT + } + + // Return the positive Dirent. + return c, nil +} + +// Walk walks to a new dirent, and will not walk higher than the given root +// Dirent, which must not be nil. +func (d *Dirent) Walk(ctx context.Context, root *Dirent, name string) (*Dirent, error) { + if root == nil { + panic("Dirent.Walk: root must not be nil") + } + + // We could use lockDirectory here, but this is a hot path and we want + // to avoid defer. + renameMu.RLock() + d.dirMu.RLock() + d.mu.Lock() + + child, err := d.walk(ctx, root, name, true /* may unlock */) + + d.mu.Unlock() + d.dirMu.RUnlock() + renameMu.RUnlock() + + return child, err +} + +// exists returns true if name exists in relation to d. +// +// Preconditions: +// - renameMu must be held for reading. +// - d.mu must be held. +// - name must must not contain "/"s. +func (d *Dirent) exists(ctx context.Context, root *Dirent, name string) bool { + child, err := d.walk(ctx, root, name, false /* may unlock */) + if err != nil { + // Child may not exist. + return false + } + // Child exists. + child.DecRef() + return true +} + +// lockDirectory should be called for any operation that changes this `d`s +// children (creating or removing them). +func (d *Dirent) lockDirectory() func() { + renameMu.RLock() + d.dirMu.Lock() + d.mu.Lock() + return func() { + d.mu.Unlock() + d.dirMu.Unlock() + renameMu.RUnlock() + } +} + +// Create creates a new regular file in this directory. +func (d *Dirent) Create(ctx context.Context, root *Dirent, name string, flags FileFlags, perms FilePermissions) (*File, error) { + unlock := d.lockDirectory() + defer unlock() + + // Does something already exist? + if d.exists(ctx, root, name) { + return nil, syscall.EEXIST + } + + // Try the create. We need to trust the file system to return EEXIST (or something + // that will translate to EEXIST) if name already exists. + file, err := d.Inode.Create(ctx, d, name, flags, perms) + if err != nil { + return nil, err + } + child := file.Dirent + + d.finishCreate(child, name) + + // Return the reference and the new file. When the last reference to + // the file is dropped, file.Dirent may no longer be cached. + return file, nil +} + +// finishCreate validates the created file, adds it as a child of this dirent, +// and notifies any watchers. +func (d *Dirent) finishCreate(child *Dirent, name string) { + // Sanity check c, its name must be consistent. + if child.name != name { + panic(fmt.Sprintf("create from %q to %q returned unexpected name %q", d.name, name, child.name)) + } + + // File systems cannot return a negative Dirent on Create, that makes no sense. + if child.IsNegative() { + panic(fmt.Sprintf("create from %q to %q returned negative Dirent", d.name, name)) + } + + // Hash the child into its parent. We can only kick out a Dirent if it is negative + // (we are replacing something that does not exist with something that now does). + if w, kicked := d.hashChild(child); kicked { + if old := w.Get(); old != nil { + if !old.(*Dirent).IsNegative() { + panic(fmt.Sprintf("hashed child %q over a positive child", child.name)) + } + // Don't leak a reference. + old.DecRef() + + // Drop d's reference. + old.DecRef() + } + + // Finally drop the useless weak reference on the floor. + w.Drop() + } + + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + + // Allow the file system to take extra references on c. + child.maybeExtendReference() +} + +// genericCreate executes create if name does not exist. Removes a negative Dirent at name if +// create succeeds. +func (d *Dirent) genericCreate(ctx context.Context, root *Dirent, name string, create func() error) error { + unlock := d.lockDirectory() + defer unlock() + + // Does something already exist? + if d.exists(ctx, root, name) { + return syscall.EEXIST + } + + // Remove any negative Dirent. We've already asserted above with d.exists + // that the only thing remaining here can be a negative Dirent. + if w, ok := d.children[name]; ok { + // Same as Create. + if old := w.Get(); old != nil { + if !old.(*Dirent).IsNegative() { + panic(fmt.Sprintf("hashed over a positive child %q", old.(*Dirent).name)) + } + // Don't leak a reference. + old.DecRef() + + // Drop d's reference. + old.DecRef() + } + + // Unhash the negative Dirent, name needs to exist now. + delete(d.children, name) + + // Finally drop the useless weak reference on the floor. + w.Drop() + } + + // Execute the create operation. + return create() +} + +// CreateLink creates a new link in this directory. +func (d *Dirent) CreateLink(ctx context.Context, root *Dirent, oldname, newname string) error { + return d.genericCreate(ctx, root, newname, func() error { + if err := d.Inode.CreateLink(ctx, d, oldname, newname); err != nil { + return err + } + d.Inode.Watches.Notify(newname, linux.IN_CREATE, 0) + return nil + }) +} + +// CreateHardLink creates a new hard link in this directory. +func (d *Dirent) CreateHardLink(ctx context.Context, root *Dirent, target *Dirent, name string) error { + // Make sure that target does not span filesystems. + if d.Inode.MountSource != target.Inode.MountSource { + return syscall.EXDEV + } + + // Directories are never linkable. See fs/namei.c:vfs_link. + if IsDir(target.Inode.StableAttr) { + return syscall.EPERM + } + + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateHardLink(ctx, d, target, name); err != nil { + return err + } + target.Inode.Watches.Notify("", linux.IN_ATTRIB, 0) // Link count change. + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + return nil + }) +} + +// CreateDirectory creates a new directory under this dirent. +func (d *Dirent) CreateDirectory(ctx context.Context, root *Dirent, name string, perms FilePermissions) error { + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateDirectory(ctx, d, name, perms); err != nil { + return err + } + d.Inode.Watches.Notify(name, linux.IN_ISDIR|linux.IN_CREATE, 0) + return nil + }) +} + +// Bind satisfies the InodeOperations interface; otherwise same as GetFile. +func (d *Dirent) Bind(ctx context.Context, root *Dirent, name string, data transport.BoundEndpoint, perms FilePermissions) (*Dirent, error) { + var childDir *Dirent + err := d.genericCreate(ctx, root, name, func() error { + var e error + childDir, e = d.Inode.Bind(ctx, d, name, data, perms) + if e != nil { + return e + } + d.finishCreate(childDir, name) + return nil + }) + if err == syscall.EEXIST { + return nil, syscall.EADDRINUSE + } + if err != nil { + return nil, err + } + return childDir, err +} + +// CreateFifo creates a new named pipe under this dirent. +func (d *Dirent) CreateFifo(ctx context.Context, root *Dirent, name string, perms FilePermissions) error { + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateFifo(ctx, d, name, perms); err != nil { + return err + } + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + return nil + }) +} + +// GetDotAttrs returns the DentAttrs corresponding to "." and ".." directories. +func (d *Dirent) GetDotAttrs(root *Dirent) (DentAttr, DentAttr) { + // Get '.'. + sattr := d.Inode.StableAttr + dot := DentAttr{ + Type: sattr.Type, + InodeID: sattr.InodeID, + } + + // Hold d.mu while we call d.descendantOf. + d.mu.Lock() + defer d.mu.Unlock() + + // Get '..'. + if !d.IsRoot() && d.descendantOf(root) { + // Dirent is a descendant of the root. Get its parent's attrs. + psattr := d.parent.Inode.StableAttr + dotdot := DentAttr{ + Type: psattr.Type, + InodeID: psattr.InodeID, + } + return dot, dotdot + } + // Dirent is either root or not a descendant of the root. ".." is the + // same as ".". + return dot, dot +} + +// DirIterator is an open directory containing directory entries that can be read. +type DirIterator interface { + // IterateDir emits directory entries by calling dirCtx.EmitDir, beginning + // with the entry at offset and returning the next directory offset. + // + // Entries for "." and ".." must *not* be included. + // + // If the offset returned is the same as the argument offset, then + // nothing has been serialized. This is equivalent to reaching EOF. + // In this case serializer.Written() should return 0. + // + // The order of entries to emit must be consistent between Readdir + // calls, and must start with the given offset. + // + // The caller must ensure that this operation is permitted. + IterateDir(ctx context.Context, d *Dirent, dirCtx *DirCtx, offset int) (int, error) +} + +// DirentReaddir serializes the directory entries of d including "." and "..". +// +// Arguments: +// +// * d: the Dirent of the directory being read; required to provide "." and "..". +// * it: the directory iterator; which represents an open directory handle. +// * root: fs root; if d is equal to the root, then '..' will refer to d. +// * ctx: context provided to file systems in order to select and serialize entries. +// * offset: the current directory offset. +// +// Returns the offset of the *next* element which was not serialized. +func DirentReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent, dirCtx *DirCtx, offset int64) (int64, error) { + offset, err := direntReaddir(ctx, d, it, root, dirCtx, offset) + // Serializing any directory entries at all means success. + if dirCtx.Serializer.Written() > 0 { + return offset, nil + } + return offset, err +} + +func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent, dirCtx *DirCtx, offset int64) (int64, error) { + if root == nil { + panic("Dirent.Readdir: root must not be nil") + } + if dirCtx.Serializer == nil { + panic("Dirent.Readdir: serializer must not be nil") + } + + // Check that this is actually a directory before emitting anything. + // Once we have written entries for "." and "..", future errors from + // IterateDir will be hidden. + if !IsDir(d.Inode.StableAttr) { + return 0, syserror.ENOTDIR + } + + // This is a special case for lseek(fd, 0, SEEK_END). + // See SeekWithDirCursor for more details. + if offset == FileMaxOffset { + return offset, nil + } + + // Collect attrs for "." and "..". + dot, dotdot := d.GetDotAttrs(root) + + // Emit "." and ".." if the offset is low enough. + if offset == 0 { + // Serialize ".". + if err := dirCtx.DirEmit(".", dot); err != nil { + return offset, err + } + offset++ + } + if offset == 1 { + // Serialize "..". + if err := dirCtx.DirEmit("..", dotdot); err != nil { + return offset, err + } + offset++ + } + + // it.IterateDir should be passed an offset that does not include the + // initial dot elements. We will add them back later. + offset -= 2 + newOffset, err := it.IterateDir(ctx, d, dirCtx, int(offset)) + if int64(newOffset) < offset { + panic(fmt.Sprintf("node.Readdir returned offset %v less than input offset %v", newOffset, offset)) + } + // Add the initial nodes back to the offset count. + newOffset += 2 + return int64(newOffset), err +} + +// flush flushes all weak references recursively, and removes any cached +// references to children. +// +// Preconditions: d.mu must be held. +func (d *Dirent) flush() { + expired := make(map[string]*refs.WeakRef) + for n, w := range d.children { + // Call flush recursively on each child before removing our + // reference on it, and removing the cache's reference. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + if !cd.IsNegative() { + // Flush the child. + cd.mu.Lock() + cd.flush() + cd.mu.Unlock() + + // Allow the file system to drop extra references on child. + cd.dropExtendedReference() + } + + // Don't leak a reference. + child.DecRef() + } + // Check if the child dirent is closed, and mark it as expired if it is. + // We must call w.Get() again here, since the child could have been closed + // by the calls to flush() and cache.Remove() in the above if-block. + if child := w.Get(); child != nil { + child.DecRef() + } else { + expired[n] = w + } + } + + // Remove expired entries. + for n, w := range expired { + delete(d.children, n) + w.Drop() + } +} + +// isMountPoint returns true if the dirent is a mount point or the root. +func (d *Dirent) isMountPoint() bool { + d.mu.Lock() + defer d.mu.Unlock() + return d.isMountPointLocked() +} + +func (d *Dirent) isMountPointLocked() bool { + return d.mounted || d.parent == nil +} + +// mount mounts a new dirent with the given inode over d. +// +// Precondition: must be called with mm.withMountLocked held on `d`. +func (d *Dirent) mount(ctx context.Context, inode *Inode) (newChild *Dirent, err error) { + // Did we race with deletion? + if atomic.LoadInt32(&d.deleted) != 0 { + return nil, syserror.ENOENT + } + + // Refuse to mount a symlink. + // + // See Linux equivalent in fs/namespace.c:do_add_mount. + if IsSymlink(inode.StableAttr) { + return nil, syserror.EINVAL + } + + // Dirent that'll replace d. + // + // Note that NewDirent returns with one reference taken; the reference + // is donated to the caller as the mount reference. + replacement := NewDirent(ctx, inode, d.name) + replacement.mounted = true + + weakRef, ok := d.parent.hashChild(replacement) + if !ok { + panic("mount must mount over an existing dirent") + } + weakRef.Drop() + + // Note that even though `d` is now hidden, it still holds a reference + // to its parent. + return replacement, nil +} + +// unmount unmounts `d` and replaces it with the last Dirent that was in its +// place, supplied by the MountNamespace as `replacement`. +// +// Precondition: must be called with mm.withMountLocked held on `d`. +func (d *Dirent) unmount(ctx context.Context, replacement *Dirent) error { + // Did we race with deletion? + if atomic.LoadInt32(&d.deleted) != 0 { + return syserror.ENOENT + } + + // Remount our former child in its place. + // + // As replacement used to be our child, it must already have the right + // parent. + weakRef, ok := d.parent.hashChildParentSet(replacement) + if !ok { + panic("mount must mount over an existing dirent") + } + weakRef.Drop() + + // d is not reachable anymore, and hence not mounted anymore. + d.mounted = false + + // Drop mount reference. + d.DecRef() + return nil +} + +// Remove removes the given file or symlink. The root dirent is used to +// resolve name, and must not be nil. +func (d *Dirent) Remove(ctx context.Context, root *Dirent, name string, dirPath bool) error { + // Check the root. + if root == nil { + panic("Dirent.Remove: root must not be nil") + } + + unlock := d.lockDirectory() + defer unlock() + + // Try to walk to the node. + child, err := d.walk(ctx, root, name, false /* may unlock */) + if err != nil { + // Child does not exist. + return err + } + defer child.DecRef() + + // Remove cannot remove directories. + if IsDir(child.Inode.StableAttr) { + return syscall.EISDIR + } else if dirPath { + return syscall.ENOTDIR + } + + // Remove cannot remove a mount point. + if child.isMountPoint() { + return syscall.EBUSY + } + + // Try to remove name on the file system. + if err := d.Inode.Remove(ctx, d, child); err != nil { + return err + } + + // Link count changed, this only applies to non-directory nodes. + child.Inode.Watches.Notify("", linux.IN_ATTRIB, 0) + + // Mark name as deleted and remove from children. + atomic.StoreInt32(&child.deleted, 1) + if w, ok := d.children[name]; ok { + delete(d.children, name) + w.Drop() + } + + // Allow the file system to drop extra references on child. + child.dropExtendedReference() + + // Finally, let inotify know the child is being unlinked. Drop any extra + // refs from inotify to this child dirent. This doesn't necessarily mean the + // watches on the underlying inode will be destroyed, since the underlying + // inode may have other links. If this was the last link, the events for the + // watch removal will be queued by the inode destructor. + child.Inode.Watches.MarkUnlinked() + child.Inode.Watches.Unpin(child) + d.Inode.Watches.Notify(name, linux.IN_DELETE, 0) + + return nil +} + +// RemoveDirectory removes the given directory. The root dirent is used to +// resolve name, and must not be nil. +func (d *Dirent) RemoveDirectory(ctx context.Context, root *Dirent, name string) error { + // Check the root. + if root == nil { + panic("Dirent.Remove: root must not be nil") + } + + unlock := d.lockDirectory() + defer unlock() + + // Check for dots. + if name == "." { + // Rejected as the last component by rmdir(2). + return syscall.EINVAL + } + if name == ".." { + // If d was found, then its parent is not empty. + return syscall.ENOTEMPTY + } + + // Try to walk to the node. + child, err := d.walk(ctx, root, name, false /* may unlock */) + if err != nil { + // Child does not exist. + return err + } + defer child.DecRef() + + // RemoveDirectory can only remove directories. + if !IsDir(child.Inode.StableAttr) { + return syscall.ENOTDIR + } + + // Remove cannot remove a mount point. + if child.isMountPoint() { + return syscall.EBUSY + } + + // Try to remove name on the file system. + if err := d.Inode.Remove(ctx, d, child); err != nil { + return err + } + + // Mark name as deleted and remove from children. + atomic.StoreInt32(&child.deleted, 1) + if w, ok := d.children[name]; ok { + delete(d.children, name) + w.Drop() + } + + // Allow the file system to drop extra references on child. + child.dropExtendedReference() + + // Finally, let inotify know the child is being unlinked. Drop any extra + // refs from inotify to this child dirent. + child.Inode.Watches.MarkUnlinked() + child.Inode.Watches.Unpin(child) + d.Inode.Watches.Notify(name, linux.IN_ISDIR|linux.IN_DELETE, 0) + + return nil +} + +// destroy closes this node and all children. +func (d *Dirent) destroy() { + if d.IsNegative() { + // Nothing to tear-down and no parent references to drop, since a negative + // Dirent does not take a references on its parent, has no Inode and no children. + return + } + + d.mu.Lock() + defer d.mu.Unlock() + + // Drop all weak references. + for _, w := range d.children { + if c := w.Get(); c != nil { + if c.(*Dirent).IsNegative() { + // The parent holds both weak and strong refs in the case of + // negative dirents. + c.DecRef() + } + // Drop the reference we just acquired in WeakRef.Get. + c.DecRef() + } + w.Drop() + } + d.children = nil + + allDirents.remove(d) + + // Drop our reference to the Inode. + d.Inode.DecRef() + + // Allow the Dirent to be GC'ed after this point, since the Inode may still + // be referenced after the Dirent is destroyed (for instance by filesystem + // internal caches or hard links). + d.Inode = nil + + // Drop the reference we have on our parent if we took one. renameMu doesn't need to be + // held because d can't be reparented without any references to it left. + if d.parent != nil { + d.parent.DecRef() + } +} + +// IncRef increases the Dirent's refcount as well as its mount's refcount. +// +// IncRef implements RefCounter.IncRef. +func (d *Dirent) IncRef() { + if d.Inode != nil { + d.Inode.MountSource.IncDirentRefs() + } + d.AtomicRefCount.IncRef() +} + +// TryIncRef implements RefCounter.TryIncRef. +func (d *Dirent) TryIncRef() bool { + ok := d.AtomicRefCount.TryIncRef() + if ok && d.Inode != nil { + d.Inode.MountSource.IncDirentRefs() + } + return ok +} + +// DecRef decreases the Dirent's refcount and drops its reference on its mount. +// +// DecRef implements RefCounter.DecRef with destructor d.destroy. +func (d *Dirent) DecRef() { + if d.Inode != nil { + // Keep mount around, since DecRef may destroy d.Inode. + msrc := d.Inode.MountSource + d.DecRefWithDestructor(d.destroy) + msrc.DecDirentRefs() + } else { + d.DecRefWithDestructor(d.destroy) + } +} + +// InotifyEvent notifies all watches on the inode for this dirent and its parent +// of potential events. The events may not actually propagate up to the user, +// depending on the event masks. InotifyEvent automatically provides the name of +// the current dirent as the subject of the event as required, and adds the +// IN_ISDIR flag for dirents that refer to directories. +func (d *Dirent) InotifyEvent(events, cookie uint32) { + // N.B. We don't defer the unlocks because InotifyEvent is in the hot + // path of all IO operations, and the defers cost too much for small IO + // operations. + renameMu.RLock() + + if IsDir(d.Inode.StableAttr) { + events |= linux.IN_ISDIR + } + + // The ordering below is important, Linux always notifies the parent first. + if d.parent != nil { + // name is immediately stale w.r.t. renames (renameMu doesn't + // protect against renames in the same directory). Holding + // d.parent.mu around Notify() wouldn't matter since Notify + // doesn't provide a synchronous mechanism for reading the name + // anyway. + d.parent.mu.Lock() + name := d.name + d.parent.mu.Unlock() + d.parent.Inode.Watches.Notify(name, events, cookie) + } + d.Inode.Watches.Notify("", events, cookie) + + renameMu.RUnlock() +} + +// maybeExtendReference caches a reference on this Dirent if +// MountSourceOperations.Keep returns true. +func (d *Dirent) maybeExtendReference() { + if msrc := d.Inode.MountSource; msrc.Keep(d) { + msrc.fscache.Add(d) + } +} + +// dropExtendedReference drops any cached reference held by the +// MountSource on the dirent. +func (d *Dirent) dropExtendedReference() { + d.Inode.MountSource.fscache.Remove(d) +} + +// lockForRename takes locks on oldParent and newParent as required by Rename +// and returns a function that will unlock the locks taken. The returned +// function must be called even if a non-nil error is returned. +func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName string) (func(), error) { + renameMu.Lock() + if oldParent == newParent { + oldParent.mu.Lock() + return func() { + oldParent.mu.Unlock() + renameMu.Unlock() + }, nil + } + + // Renaming between directories is a bit subtle: + // + // - A concurrent cross-directory Rename may try to lock in the opposite + // order; take renameMu to prevent this from happening. + // + // - If either directory is an ancestor of the other, then a concurrent + // Remove may lock the descendant (in DecRef -> closeAll) while holding a + // lock on the ancestor; to avoid this, ensure we take locks in the same + // ancestor-to-descendant order. (Holding renameMu prevents this + // relationship from changing.) + + // First check if newParent is a descendant of oldParent. + child := newParent + for p := newParent.parent; p != nil; p = p.parent { + if p == oldParent { + oldParent.mu.Lock() + newParent.mu.Lock() + var err error + if child.name == oldName { + // newParent is not just a descendant of oldParent, but + // more specifically of oldParent/oldName. That is, we're + // trying to rename something into a subdirectory of + // itself. + err = syscall.EINVAL + } + return func() { + newParent.mu.Unlock() + oldParent.mu.Unlock() + renameMu.Unlock() + }, err + } + child = p + } + + // Otherwise, either oldParent is a descendant of newParent or the two + // have no relationship; in either case we can do this: + newParent.mu.Lock() + oldParent.mu.Lock() + return func() { + oldParent.mu.Unlock() + newParent.mu.Unlock() + renameMu.Unlock() + }, nil +} + +func (d *Dirent) checkSticky(ctx context.Context, victim *Dirent) error { + uattr, err := d.Inode.UnstableAttr(ctx) + if err != nil { + return syserror.EPERM + } + if !uattr.Perms.Sticky { + return nil + } + + creds := auth.CredentialsFromContext(ctx) + if uattr.Owner.UID == creds.EffectiveKUID { + return nil + } + + vuattr, err := victim.Inode.UnstableAttr(ctx) + if err != nil { + return syserror.EPERM + } + if vuattr.Owner.UID == creds.EffectiveKUID { + return nil + } + if victim.Inode.CheckCapability(ctx, linux.CAP_FOWNER) { + return nil + } + return syserror.EPERM +} + +// MayDelete determines whether `name`, a child of `d`, can be deleted or +// renamed by `ctx`. +// +// Compare Linux kernel fs/namei.c:may_delete. +func (d *Dirent) MayDelete(ctx context.Context, root *Dirent, name string) error { + if err := d.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil { + return err + } + + unlock := d.lockDirectory() + defer unlock() + + victim, err := d.walk(ctx, root, name, true /* may unlock */) + if err != nil { + return err + } + defer victim.DecRef() + + return d.mayDelete(ctx, victim) +} + +// mayDelete determines whether `victim`, a child of `dir`, can be deleted or +// renamed by `ctx`. +// +// Preconditions: `dir` is writable and executable by `ctx`. +func (d *Dirent) mayDelete(ctx context.Context, victim *Dirent) error { + if err := d.checkSticky(ctx, victim); err != nil { + return err + } + + if victim.IsRoot() { + return syserror.EBUSY + } + + return nil +} + +// Rename atomically converts the child of oldParent named oldName to a +// child of newParent named newName. +func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string, newParent *Dirent, newName string) error { + if root == nil { + panic("Rename: root must not be nil") + } + if oldParent == newParent && oldName == newName { + return nil + } + + // Acquire global renameMu lock, and mu locks on oldParent/newParent. + unlock, err := lockForRename(oldParent, oldName, newParent, newName) + defer unlock() + if err != nil { + return err + } + + // Do we have general permission to remove from oldParent and + // create/replace in newParent? + if err := oldParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil { + return err + } + if err := newParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil { + return err + } + + // renamed is the dirent that will be renamed to something else. + renamed, err := oldParent.walk(ctx, root, oldName, false /* may unlock */) + if err != nil { + return err + } + defer renamed.DecRef() + + // Check that the renamed dirent is deletable. + if err := oldParent.mayDelete(ctx, renamed); err != nil { + return err + } + + // Check that the renamed dirent is not a mount point. + if renamed.isMountPointLocked() { + return syscall.EBUSY + } + + // Source should not be an ancestor of the target. + if newParent.descendantOf(renamed) { + return syscall.EINVAL + } + + // Per rename(2): "... EACCES: ... or oldpath is a directory and does not + // allow write permission (needed to update the .. entry)." + if IsDir(renamed.Inode.StableAttr) { + if err := renamed.Inode.CheckPermission(ctx, PermMask{Write: true}); err != nil { + return err + } + } + + // replaced is the dirent that is being overwritten by rename. + replaced, err := newParent.walk(ctx, root, newName, false /* may unlock */) + if err != nil { + if err != syserror.ENOENT { + return err + } + + // newName doesn't exist; simply create it below. + replaced = nil + } else { + // Check constraints on the dirent being replaced. + + // NOTE(b/111808347): We don't want to keep replaced alive + // across the Rename, so must call DecRef manually (no defer). + + // Check that we can delete replaced. + if err := newParent.mayDelete(ctx, replaced); err != nil { + replaced.DecRef() + return err + } + + // Target should not be an ancestor of source. + if oldParent.descendantOf(replaced) { + replaced.DecRef() + + // Note that Linux returns EINVAL if the source is an + // ancestor of target, but ENOTEMPTY if the target is + // an ancestor of source (unless RENAME_EXCHANGE flag + // is present). See fs/namei.c:renameat2. + return syscall.ENOTEMPTY + } + + // Check that replaced is not a mount point. + if replaced.isMountPointLocked() { + replaced.DecRef() + return syscall.EBUSY + } + + // Require that a directory is replaced by a directory. + oldIsDir := IsDir(renamed.Inode.StableAttr) + newIsDir := IsDir(replaced.Inode.StableAttr) + if !newIsDir && oldIsDir { + replaced.DecRef() + return syscall.ENOTDIR + } + if !oldIsDir && newIsDir { + replaced.DecRef() + return syscall.EISDIR + } + + // Allow the file system to drop extra references on replaced. + replaced.dropExtendedReference() + + // NOTE(b/31798319,b/31867149,b/31867671): Keeping a dirent + // open across renames is currently broken for multiple + // reasons, so we flush all references on the replaced node and + // its children. + replaced.Inode.Watches.Unpin(replaced) + replaced.mu.Lock() + replaced.flush() + replaced.mu.Unlock() + + // Done with replaced. + replaced.DecRef() + } + + if err := renamed.Inode.Rename(ctx, oldParent, renamed, newParent, newName, replaced != nil); err != nil { + return err + } + + renamed.name = newName + renamed.parent = newParent + if oldParent != newParent { + // Reparent the reference held by renamed.parent. oldParent.DecRef + // can't destroy oldParent (and try to retake its lock) because + // Rename's caller must be holding a reference. + newParent.IncRef() + oldParent.DecRef() + } + if w, ok := newParent.children[newName]; ok { + w.Drop() + delete(newParent.children, newName) + } + if w, ok := oldParent.children[oldName]; ok { + w.Drop() + delete(oldParent.children, oldName) + } + + // Add a weak reference from the new parent. This ensures that the child + // can still be found from the new parent if a prior hard reference is + // held on renamed. + // + // This is required for file lock correctness because file locks are per-Dirent + // and without maintaining the a cached child (via a weak reference) for renamed, + // multiple Dirents can correspond to the same resource (by virtue of the renamed + // Dirent being unreachable by its parent and it being looked up). + newParent.children[newName] = refs.NewWeakRef(renamed, nil) + + // Queue inotify events for the rename. + var ev uint32 + if IsDir(renamed.Inode.StableAttr) { + ev |= linux.IN_ISDIR + } + + cookie := uniqueid.InotifyCookie(ctx) + oldParent.Inode.Watches.Notify(oldName, ev|linux.IN_MOVED_FROM, cookie) + newParent.Inode.Watches.Notify(newName, ev|linux.IN_MOVED_TO, cookie) + // Somewhat surprisingly, self move events do not have a cookie. + renamed.Inode.Watches.Notify("", linux.IN_MOVE_SELF, 0) + + // Allow the file system to drop extra references on renamed. + renamed.dropExtendedReference() + + // Same as replaced.flush above. + renamed.mu.Lock() + renamed.flush() + renamed.mu.Unlock() + + return nil +} diff --git a/pkg/sentry/fs/dirent_cache.go b/pkg/sentry/fs/dirent_cache.go new file mode 100644 index 000000000..33de32c69 --- /dev/null +++ b/pkg/sentry/fs/dirent_cache.go @@ -0,0 +1,174 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/sync" +) + +// DirentCache is an LRU cache of Dirents. The Dirent's refCount is +// incremented when it is added to the cache, and decremented when it is +// removed. +// +// A nil DirentCache corresponds to a cache with size 0. All methods can be +// called, but nothing is actually cached. +// +// +stateify savable +type DirentCache struct { + // Maximum size of the cache. This must be saved manually, to handle the case + // when cache is nil. + maxSize uint64 + + // limit restricts the number of entries in the cache amoung multiple caches. + // It may be nil if there are no global limit for this cache. + limit *DirentCacheLimiter + + // mu protects currentSize and direntList. + mu sync.Mutex `state:"nosave"` + + // currentSize is the number of elements in the cache. It must be zero (i.e. + // the cache must be empty) on Save. + currentSize uint64 `state:"zerovalue"` + + // list is a direntList, an ilist of Dirents. New Dirents are added + // to the front of the list. Old Dirents are removed from the back of + // the list. It must be zerovalue (i.e. the cache must be empty) on Save. + list direntList `state:"zerovalue"` +} + +// NewDirentCache returns a new DirentCache with the given maxSize. +func NewDirentCache(maxSize uint64) *DirentCache { + return &DirentCache{ + maxSize: maxSize, + } +} + +// Add adds the element to the cache and increments the refCount. If the +// argument is already in the cache, it is moved to the front. An element is +// removed from the back if the cache is over capacity. +func (c *DirentCache) Add(d *Dirent) { + if c == nil || c.maxSize == 0 { + return + } + + c.mu.Lock() + if c.contains(d) { + // d is already in cache. Bump it to the front. + // currentSize and refCount are unaffected. + c.list.Remove(d) + c.list.PushFront(d) + c.mu.Unlock() + return + } + + // First check against the global limit. + for c.limit != nil && !c.limit.tryInc() { + if c.currentSize == 0 { + // If the global limit is reached, but there is nothing more to drop from + // this cache, there is not much else to do. + c.mu.Unlock() + return + } + c.remove(c.list.Back()) + } + + // d is not in cache. Add it and take a reference. + c.list.PushFront(d) + d.IncRef() + c.currentSize++ + + c.maybeShrink() + + c.mu.Unlock() +} + +func (c *DirentCache) remove(d *Dirent) { + if !c.contains(d) { + panic(fmt.Sprintf("trying to remove %v, which is not in the dirent cache", d)) + } + c.list.Remove(d) + d.DecRef() + c.currentSize-- + if c.limit != nil { + c.limit.dec() + } +} + +// Remove removes the element from the cache and decrements its refCount. It +// also sets the previous and next elements to nil, which allows us to +// determine if a given element is in the cache. +func (c *DirentCache) Remove(d *Dirent) { + if c == nil || c.maxSize == 0 { + return + } + c.mu.Lock() + if !c.contains(d) { + c.mu.Unlock() + return + } + c.remove(d) + c.mu.Unlock() +} + +// Size returns the number of elements in the cache. +func (c *DirentCache) Size() uint64 { + if c == nil { + return 0 + } + c.mu.Lock() + size := c.currentSize + c.mu.Unlock() + return size +} + +func (c *DirentCache) contains(d *Dirent) bool { + // If d has a Prev or Next element, then it is in the cache. + if d.Prev() != nil || d.Next() != nil { + return true + } + // Otherwise, d is in the cache if it is the only element (and thus the + // first element). + return c.list.Front() == d +} + +// Invalidate removes all Dirents from the cache, calling DecRef on each. +func (c *DirentCache) Invalidate() { + if c == nil { + return + } + c.mu.Lock() + for c.list.Front() != nil { + c.remove(c.list.Front()) + } + c.mu.Unlock() +} + +// setMaxSize sets cache max size. If current size is larger than max size, the +// cache shrinks to accommodate the new max. +func (c *DirentCache) setMaxSize(max uint64) { + c.mu.Lock() + c.maxSize = max + c.maybeShrink() + c.mu.Unlock() +} + +// shrink removes the oldest element until the list is under the size limit. +func (c *DirentCache) maybeShrink() { + for c.maxSize > 0 && c.currentSize > c.maxSize { + c.remove(c.list.Back()) + } +} diff --git a/pkg/sentry/fs/dirent_cache_limiter.go b/pkg/sentry/fs/dirent_cache_limiter.go new file mode 100644 index 000000000..525ee25f9 --- /dev/null +++ b/pkg/sentry/fs/dirent_cache_limiter.go @@ -0,0 +1,56 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/sync" +) + +// DirentCacheLimiter acts as a global limit for all dirent caches in the +// process. +// +// +stateify savable +type DirentCacheLimiter struct { + mu sync.Mutex `state:"nosave"` + max uint64 + count uint64 `state:"zerovalue"` +} + +// NewDirentCacheLimiter creates a new DirentCacheLimiter. +func NewDirentCacheLimiter(max uint64) *DirentCacheLimiter { + return &DirentCacheLimiter{max: max} +} + +func (d *DirentCacheLimiter) tryInc() bool { + d.mu.Lock() + if d.count >= d.max { + d.mu.Unlock() + return false + } + d.count++ + d.mu.Unlock() + return true +} + +func (d *DirentCacheLimiter) dec() { + d.mu.Lock() + if d.count == 0 { + panic(fmt.Sprintf("underflowing DirentCacheLimiter count: %+v", d)) + } + d.count-- + d.mu.Unlock() +} diff --git a/pkg/sentry/fs/dirent_cache_test.go b/pkg/sentry/fs/dirent_cache_test.go new file mode 100644 index 000000000..395c879f5 --- /dev/null +++ b/pkg/sentry/fs/dirent_cache_test.go @@ -0,0 +1,247 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "testing" +) + +func TestDirentCache(t *testing.T) { + const maxSize = 5 + + c := NewDirentCache(maxSize) + + // Size starts at 0. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Create a Dirent d. + d := NewNegativeDirent("") + + // c does not contain d. + if got, want := c.contains(d), false; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add d to the cache. + c.Add(d) + + // Size is now 1. + if got, want := c.Size(), uint64(1); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add maxSize-1 more elements. d should be oldest element. + for i := 0; i < maxSize-1; i++ { + c.Add(NewNegativeDirent("")) + } + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // "Bump" d to the front by re-adding it. + c.Add(d) + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add maxSize-1 more elements. d should again be oldest element. + for i := 0; i < maxSize-1; i++ { + c.Add(NewNegativeDirent("")) + } + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add one more element, which will bump d from the cache. + c.Add(NewNegativeDirent("")) + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c does not contain d. + if got, want := c.contains(d), false; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Invalidating causes size to be 0 and list to be empty. + c.Invalidate() + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + if got, want := c.list.Empty(), true; got != want { + t.Errorf("c.list.Empty() got %v, want %v", got, want) + } + + // Fill cache with maxSize dirents. + for i := 0; i < maxSize; i++ { + c.Add(NewNegativeDirent("")) + } +} + +func TestDirentCacheLimiter(t *testing.T) { + const ( + globalMaxSize = 5 + maxSize = 3 + ) + + limit := NewDirentCacheLimiter(globalMaxSize) + c1 := NewDirentCache(maxSize) + c1.limit = limit + c2 := NewDirentCache(maxSize) + c2.limit = limit + + // Create a Dirent d. + d := NewNegativeDirent("") + + // Add d to the cache. + c1.Add(d) + if got, want := c1.Size(), uint64(1); got != want { + t.Errorf("c1.Size() got %v, want %v", got, want) + } + + // Add maxSize-1 more elements. d should be oldest element. + for i := 0; i < maxSize-1; i++ { + c1.Add(NewNegativeDirent("")) + } + if got, want := c1.Size(), uint64(maxSize); got != want { + t.Errorf("c1.Size() got %v, want %v", got, want) + } + + // Check that d is still there. + if got, want := c1.contains(d), true; got != want { + t.Errorf("c1.contains(d) got %v want %v", got, want) + } + + // Fill up the other cache, it will start dropping old entries from the cache + // when the global limit is reached. + for i := 0; i < maxSize; i++ { + c2.Add(NewNegativeDirent("")) + } + + // Check is what's remaining from global max. + if got, want := c2.Size(), globalMaxSize-maxSize; int(got) != want { + t.Errorf("c2.Size() got %v, want %v", got, want) + } + + // Check that d was not dropped. + if got, want := c1.contains(d), true; got != want { + t.Errorf("c1.contains(d) got %v want %v", got, want) + } + + // Add an entry that will eventually be dropped. Check is done later... + drop := NewNegativeDirent("") + c1.Add(drop) + + // Check that d is bumped to front even when global limit is reached. + c1.Add(d) + if got, want := c1.contains(d), true; got != want { + t.Errorf("c1.contains(d) got %v want %v", got, want) + } + + // Add 2 more element and check that: + // - d is still in the list: to verify that d was bumped + // - d2/d3 are in the list: older entries are dropped when global limit is + // reached. + // - drop is not in the list: indeed older elements are dropped. + d2 := NewNegativeDirent("") + c1.Add(d2) + d3 := NewNegativeDirent("") + c1.Add(d3) + if got, want := c1.contains(d), true; got != want { + t.Errorf("c1.contains(d) got %v want %v", got, want) + } + if got, want := c1.contains(d2), true; got != want { + t.Errorf("c1.contains(d2) got %v want %v", got, want) + } + if got, want := c1.contains(d3), true; got != want { + t.Errorf("c1.contains(d3) got %v want %v", got, want) + } + if got, want := c1.contains(drop), false; got != want { + t.Errorf("c1.contains(drop) got %v want %v", got, want) + } + + // Drop all entries from one cache. The other will be allowed to grow. + c1.Invalidate() + c2.Add(NewNegativeDirent("")) + if got, want := c2.Size(), uint64(maxSize); got != want { + t.Errorf("c2.Size() got %v, want %v", got, want) + } +} + +// TestNilDirentCache tests that a nil cache supports all cache operations, but +// treats them as noop. +func TestNilDirentCache(t *testing.T) { + // Create a nil cache. + var c *DirentCache + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Add. + c.Add(NewNegativeDirent("")) + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Remove. + c.Remove(NewNegativeDirent("")) + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Invalidate. + c.Invalidate() + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } +} diff --git a/pkg/sentry/fs/dirent_refs_test.go b/pkg/sentry/fs/dirent_refs_test.go new file mode 100644 index 000000000..98d69c6f2 --- /dev/null +++ b/pkg/sentry/fs/dirent_refs_test.go @@ -0,0 +1,418 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "syscall" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/contexttest" +) + +func newMockDirInode(ctx context.Context, cache *DirentCache) *Inode { + return NewMockInode(ctx, NewMockMountSource(cache), StableAttr{Type: Directory}) +} + +func TestWalkPositive(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(ctx, newMockDirInode(ctx, nil), "root") + + if got := root.ReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + name := "d" + d, err := root.walk(ctx, root, name, false) + if err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + + if got := root.ReadRefs(); got != 2 { + t.Fatalf("root has a ref count of %d, want %d", got, 2) + } + + if got := d.ReadRefs(); got != 1 { + t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, 1) + } + + d.DecRef() + + if got := root.ReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + if got := d.ReadRefs(); got != 0 { + t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, 0) + } + + root.flush() + + if got := len(root.children); got != 0 { + t.Fatalf("root has %d children, want %d", got, 0) + } +} + +func TestWalkNegative(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(ctx, NewEmptyDir(ctx, nil), "root") + mn := root.Inode.InodeOperations.(*mockInodeOperationsLookupNegative) + + if got := root.ReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + name := "d" + for i := 0; i < 100; i++ { + _, err := root.walk(ctx, root, name, false) + if err != syscall.ENOENT { + t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, syscall.ENOENT) + } + } + + if got := root.ReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + if got := len(root.children); got != 1 { + t.Fatalf("root has %d children, want %d", got, 1) + } + + w, ok := root.children[name] + if !ok { + t.Fatalf("root wants child at %q", name) + } + + child := w.Get() + if child == nil { + t.Fatalf("root wants to resolve weak reference") + } + + if !child.(*Dirent).IsNegative() { + t.Fatalf("root found positive child at %q, want negative", name) + } + + if got := child.(*Dirent).ReadRefs(); got != 2 { + t.Fatalf("child has a ref count of %d, want %d", got, 2) + } + + child.DecRef() + + if got := child.(*Dirent).ReadRefs(); got != 1 { + t.Fatalf("child has a ref count of %d, want %d", got, 1) + } + + if got := len(root.children); got != 1 { + t.Fatalf("root has %d children, want %d", got, 1) + } + + root.DecRef() + + if got := root.ReadRefs(); got != 0 { + t.Fatalf("root has a ref count of %d, want %d", got, 0) + } + + AsyncBarrier() + + if got := mn.releaseCalled; got != true { + t.Fatalf("root.Close was called %v, want true", got) + } +} + +type mockInodeOperationsLookupNegative struct { + *MockInodeOperations + releaseCalled bool +} + +func NewEmptyDir(ctx context.Context, cache *DirentCache) *Inode { + m := NewMockMountSource(cache) + return NewInode(ctx, &mockInodeOperationsLookupNegative{ + MockInodeOperations: NewMockInodeOperations(ctx), + }, m, StableAttr{Type: Directory}) +} + +func (m *mockInodeOperationsLookupNegative) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + return NewNegativeDirent(p), nil +} + +func (m *mockInodeOperationsLookupNegative) Release(context.Context) { + m.releaseCalled = true +} + +func TestHashNegativeToPositive(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(ctx, NewEmptyDir(ctx, nil), "root") + + name := "d" + _, err := root.walk(ctx, root, name, false) + if err != syscall.ENOENT { + t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, syscall.ENOENT) + } + + if got := root.exists(ctx, root, name); got != false { + t.Fatalf("got %q exists, want does not exist", name) + } + + f, err := root.Create(ctx, root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(%q, _), got error %v, want nil", name, err) + } + d := f.Dirent + + if d.IsNegative() { + t.Fatalf("got negative Dirent, want positive") + } + + if got := d.ReadRefs(); got != 1 { + t.Fatalf("child %q has a ref count of %d, want %d", name, got, 1) + } + + if got := root.ReadRefs(); got != 2 { + t.Fatalf("root has a ref count of %d, want %d", got, 2) + } + + if got := len(root.children); got != 1 { + t.Fatalf("got %d children, want %d", got, 1) + } + + w, ok := root.children[name] + if !ok { + t.Fatalf("failed to find weak reference to %q", name) + } + + child := w.Get() + if child == nil { + t.Fatalf("want to resolve weak reference") + } + + if child.(*Dirent) != d { + t.Fatalf("got foreign child") + } +} + +func TestRevalidate(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + for _, test := range []struct { + // desc is the test's description. + desc string + + // Whether to make negative Dirents. + makeNegative bool + }{ + { + desc: "Revalidate negative Dirent", + makeNegative: true, + }, + { + desc: "Revalidate positive Dirent", + makeNegative: false, + }, + } { + t.Run(test.desc, func(t *testing.T) { + ctx := contexttest.Context(t) + root := NewDirent(ctx, NewMockInodeRevalidate(ctx, test.makeNegative), "root") + + name := "d" + d1, err := root.walk(ctx, root, name, false) + if !test.makeNegative && err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + d2, err := root.walk(ctx, root, name, false) + if !test.makeNegative && err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + if !test.makeNegative && d1 == d2 { + t.Fatalf("revalidating walk got same *Dirent, want different") + } + if got := len(root.children); got != 1 { + t.Errorf("revalidating walk got %d children, want %d", got, 1) + } + }) + } +} + +type MockInodeOperationsRevalidate struct { + *MockInodeOperations + makeNegative bool +} + +func NewMockInodeRevalidate(ctx context.Context, makeNegative bool) *Inode { + mn := NewMockInodeOperations(ctx) + m := NewMockMountSource(nil) + m.MountSourceOperations.(*MockMountSourceOps).revalidate = true + return NewInode(ctx, &MockInodeOperationsRevalidate{MockInodeOperations: mn, makeNegative: makeNegative}, m, StableAttr{Type: Directory}) +} + +func (m *MockInodeOperationsRevalidate) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + if !m.makeNegative { + return m.MockInodeOperations.Lookup(ctx, dir, p) + } + return NewNegativeDirent(p), nil +} + +func TestCreateExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // root is the Dirent to create from. + root *Dirent + + // expected references on walked Dirent. + refs int64 + }{ + { + desc: "Create caching", + root: NewDirent(ctx, NewEmptyDir(ctx, NewDirentCache(1)), "root"), + refs: 2, + }, + { + desc: "Create not caching", + root: NewDirent(ctx, NewEmptyDir(ctx, nil), "root"), + refs: 1, + }, + } { + t.Run(test.desc, func(t *testing.T) { + name := "d" + f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(root, %q) failed: %v", name, err) + } + d := f.Dirent + + if got := d.ReadRefs(); got != test.refs { + t.Errorf("dirent has a ref count of %d, want %d", got, test.refs) + } + }) + } +} + +func TestRemoveExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // root is the Dirent to make and remove from. + root *Dirent + }{ + { + desc: "Remove caching", + root: NewDirent(ctx, NewEmptyDir(ctx, NewDirentCache(1)), "root"), + }, + { + desc: "Remove not caching", + root: NewDirent(ctx, NewEmptyDir(ctx, nil), "root"), + }, + } { + t.Run(test.desc, func(t *testing.T) { + name := "d" + f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(%q, _) failed: %v", name, err) + } + d := f.Dirent + + if err := test.root.Remove(contexttest.Context(t), test.root, name, false /* dirPath */); err != nil { + t.Fatalf("root.Remove(root, %q) failed: %v", name, err) + } + + if got := d.ReadRefs(); got != 1 { + t.Fatalf("dirent has a ref count of %d, want %d", got, 1) + } + + d.DecRef() + + test.root.flush() + + if got := len(test.root.children); got != 0 { + t.Errorf("root has %d children, want %d", got, 0) + } + }) + } +} + +func TestRenameExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + for _, test := range []struct { + // desc is the test's description. + desc string + + // cache of extra Dirent references, may be nil. + cache *DirentCache + }{ + { + desc: "Rename no caching", + cache: nil, + }, + { + desc: "Rename caching", + cache: NewDirentCache(5), + }, + } { + t.Run(test.desc, func(t *testing.T) { + ctx := contexttest.Context(t) + + dirAttr := StableAttr{Type: Directory} + + oldParent := NewDirent(ctx, NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "old_parent") + newParent := NewDirent(ctx, NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "new_parent") + + renamed, err := oldParent.Walk(ctx, oldParent, "old_child") + if err != nil { + t.Fatalf("Walk(oldParent, %q) got error %v, want nil", "old_child", err) + } + replaced, err := newParent.Walk(ctx, oldParent, "new_child") + if err != nil { + t.Fatalf("Walk(newParent, %q) got error %v, want nil", "new_child", err) + } + + if err := Rename(contexttest.RootContext(t), oldParent /*root */, oldParent, "old_child", newParent, "new_child"); err != nil { + t.Fatalf("Rename got error %v, want nil", err) + } + + oldParent.flush() + newParent.flush() + + // Expect to have only active references. + if got := renamed.ReadRefs(); got != 1 { + t.Errorf("renamed has ref count %d, want only active references %d", got, 1) + } + if got := replaced.ReadRefs(); got != 1 { + t.Errorf("replaced has ref count %d, want only active references %d", got, 1) + } + }) + } +} diff --git a/pkg/sentry/fs/dirent_state.go b/pkg/sentry/fs/dirent_state.go new file mode 100644 index 000000000..f623d6c0e --- /dev/null +++ b/pkg/sentry/fs/dirent_state.go @@ -0,0 +1,77 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync/atomic" + + "gvisor.dev/gvisor/pkg/refs" +) + +// beforeSave is invoked by stateify. +func (d *Dirent) beforeSave() { + // Refuse to save if the file is on a non-virtual file system and has + // already been deleted (but still has open fds, which is why the Dirent + // is still accessible). We know the the restore re-opening of the file + // will always fail. This condition will last until all the open fds and + // this Dirent are closed and released. + // + // Such "dangling" open files on virtual file systems (e.g., tmpfs) is + // OK to save as their restore does not require re-opening the files. + // + // Note that this is rejection rather than failure---it would be + // perfectly OK to save---we are simply disallowing it here to prevent + // generating non-restorable state dumps. As the program continues its + // execution, it may become allowed to save again. + if !d.Inode.IsVirtual() && atomic.LoadInt32(&d.deleted) != 0 { + n, _ := d.FullName(nil /* root */) + panic(ErrSaveRejection{fmt.Errorf("deleted file %q still has open fds", n)}) + } +} + +// saveChildren is invoked by stateify. +func (d *Dirent) saveChildren() map[string]*Dirent { + c := make(map[string]*Dirent) + for name, w := range d.children { + if rc := w.Get(); rc != nil { + // Drop the reference count obtain in w.Get() + rc.DecRef() + + cd := rc.(*Dirent) + if cd.IsNegative() { + // Don't bother saving negative Dirents. + continue + } + c[name] = cd + } + } + return c +} + +// loadChildren is invoked by stateify. +func (d *Dirent) loadChildren(children map[string]*Dirent) { + d.children = make(map[string]*refs.WeakRef) + for name, c := range children { + d.children[name] = refs.NewWeakRef(c, nil) + } +} + +// afterLoad is invoked by stateify. +func (d *Dirent) afterLoad() { + if d.userVisible { + allDirents.add(d) + } +} diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD new file mode 100644 index 000000000..1d09e983c --- /dev/null +++ b/pkg/sentry/fs/fdpipe/BUILD @@ -0,0 +1,48 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "fdpipe", + srcs = [ + "pipe.go", + "pipe_opener.go", + "pipe_state.go", + ], + imports = ["gvisor.dev/gvisor/pkg/sentry/fs"], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/context", + "//pkg/fd", + "//pkg/fdnotifier", + "//pkg/log", + "//pkg/safemem", + "//pkg/secio", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "fdpipe_test", + size = "small", + srcs = [ + "pipe_opener_test.go", + "pipe_test.go", + ], + library = ":fdpipe", + deps = [ + "//pkg/context", + "//pkg/fd", + "//pkg/fdnotifier", + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + "//pkg/syserror", + "//pkg/usermem", + "@com_github_google_uuid//:go_default_library", + ], +) diff --git a/pkg/sentry/fs/fdpipe/pipe.go b/pkg/sentry/fs/fdpipe/pipe.go new file mode 100644 index 000000000..9fce177ad --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe.go @@ -0,0 +1,168 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fdpipe implements common namedpipe opening and accessing logic. +package fdpipe + +import ( + "os" + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/secio" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// pipeOperations are the fs.FileOperations of a host pipe. +// +// +stateify savable +type pipeOperations struct { + fsutil.FilePipeSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.Queue `state:"nosave"` + + // flags are the flags used to open the pipe. + flags fs.FileFlags `state:".(fs.FileFlags)"` + + // opener is how the pipe was opened. + opener NonBlockingOpener `state:"wait"` + + // file represents the host pipe. + file *fd.FD `state:"nosave"` + + // mu protects readAheadBuffer access below. + mu sync.Mutex `state:"nosave"` + + // readAheadBuffer contains read bytes that have not yet been read + // by the application but need to be buffered for save-restore for correct + // opening semantics. The readAheadBuffer will only be non-empty when the + // is first opened and will be drained by subsequent reads on the pipe. + readAheadBuffer []byte +} + +// newPipeOperations returns an implementation of fs.FileOperations for a pipe. +func newPipeOperations(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags, file *fd.FD, readAheadBuffer []byte) (*pipeOperations, error) { + pipeOps := &pipeOperations{ + flags: flags, + opener: opener, + file: file, + readAheadBuffer: readAheadBuffer, + } + if err := pipeOps.init(); err != nil { + return nil, err + } + return pipeOps, nil +} + +// init initializes p.file. +func (p *pipeOperations) init() error { + var s syscall.Stat_t + if err := syscall.Fstat(p.file.FD(), &s); err != nil { + log.Warningf("pipe: cannot stat fd %d: %v", p.file.FD(), err) + return syscall.EINVAL + } + if (s.Mode & syscall.S_IFMT) != syscall.S_IFIFO { + log.Warningf("pipe: cannot load fd %d as pipe, file type: %o", p.file.FD(), s.Mode) + return syscall.EINVAL + } + if err := syscall.SetNonblock(p.file.FD(), true); err != nil { + return err + } + return fdnotifier.AddFD(int32(p.file.FD()), &p.Queue) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (p *pipeOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + p.Queue.EventRegister(e, mask) + fdnotifier.UpdateFD(int32(p.file.FD())) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (p *pipeOperations) EventUnregister(e *waiter.Entry) { + p.Queue.EventUnregister(e) + fdnotifier.UpdateFD(int32(p.file.FD())) +} + +// Readiness returns a mask of ready events for stream. +func (p *pipeOperations) Readiness(mask waiter.EventMask) (eventMask waiter.EventMask) { + return fdnotifier.NonBlockingPoll(int32(p.file.FD()), mask) +} + +// Release implements fs.FileOperations.Release. +func (p *pipeOperations) Release() { + fdnotifier.RemoveFD(int32(p.file.FD())) + p.file.Close() + p.file = nil +} + +// Read implements fs.FileOperations.Read. +func (p *pipeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + // Drain the read ahead buffer, if it contains anything first. + var bufN int + var bufErr error + p.mu.Lock() + if len(p.readAheadBuffer) > 0 { + bufN, bufErr = dst.CopyOut(ctx, p.readAheadBuffer) + p.readAheadBuffer = p.readAheadBuffer[bufN:] + dst = dst.DropFirst(bufN) + } + p.mu.Unlock() + if dst.NumBytes() == 0 || bufErr != nil { + return int64(bufN), bufErr + } + + // Pipes expect full reads. + n, err := dst.CopyOutFrom(ctx, safemem.FromIOReader{secio.FullReader{p.file}}) + total := int64(bufN) + n + if err != nil && isBlockError(err) { + return total, syserror.ErrWouldBlock + } + return total, err +} + +// Write implements fs.FileOperations.Write. +func (p *pipeOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + n, err := src.CopyInTo(ctx, safemem.FromIOWriter{p.file}) + if err != nil && isBlockError(err) { + return n, syserror.ErrWouldBlock + } + return n, err +} + +// isBlockError unwraps os errors and checks if they are caused by EAGAIN or +// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock. +func isBlockError(err error) bool { + if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK { + return true + } + if pe, ok := err.(*os.PathError); ok { + return isBlockError(pe.Err) + } + return false +} diff --git a/pkg/sentry/fs/fdpipe/pipe_opener.go b/pkg/sentry/fs/fdpipe/pipe_opener.go new file mode 100644 index 000000000..0c3595998 --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_opener.go @@ -0,0 +1,193 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "io" + "os" + "syscall" + "time" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/syserror" +) + +// NonBlockingOpener is a generic host file opener used to retry opening host +// pipes if necessary. +type NonBlockingOpener interface { + // NonBlockingOpen tries to open a host pipe in a non-blocking way, + // and otherwise returns an error. Implementations should be idempotent. + NonBlockingOpen(context.Context, fs.PermMask) (*fd.FD, error) +} + +// Open blocks until a host pipe can be opened or the action was cancelled. +// On success, returns fs.FileOperations wrapping the opened host pipe. +func Open(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (fs.FileOperations, error) { + p := &pipeOpenState{} + canceled := false + for { + if file, err := p.TryOpen(ctx, opener, flags); err != syserror.ErrWouldBlock { + return file, err + } + + // Honor the cancellation request if open still blocks. + if canceled { + // If we were canceled but we have a handle to a host + // file, we need to close it. + if p.hostFile != nil { + p.hostFile.Close() + } + return nil, syserror.ErrInterrupted + } + + cancel := ctx.SleepStart() + select { + case <-cancel: + // The cancellation request received here really says + // "cancel from now on (or ASAP)". Any environmental + // changes happened before receiving it, that might have + // caused open to not block anymore, should still be + // respected. So we cannot just return here. We have to + // give open another try below first. + canceled = true + ctx.SleepFinish(false) + case <-time.After(100 * time.Millisecond): + // If we would block, then delay retrying for a bit, since there + // is no way to know when the pipe would be ready to be + // re-opened. This is identical to sending an event notification + // to stop blocking in Task.Block, given that this routine will + // stop retrying if a cancelation is received. + ctx.SleepFinish(true) + } + } +} + +// pipeOpenState holds state needed to open a blocking named pipe read only, for instance the +// file that has been opened but doesn't yet have a corresponding writer. +type pipeOpenState struct { + // hostFile is the read only named pipe which lacks a corresponding writer. + hostFile *fd.FD +} + +// unwrapError is needed to match against ENXIO primarily. +func unwrapError(err error) error { + if pe, ok := err.(*os.PathError); ok { + return pe.Err + } + return err +} + +// TryOpen uses a NonBlockingOpener to try to open a host pipe, respecting the fs.FileFlags. +func (p *pipeOpenState) TryOpen(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (*pipeOperations, error) { + switch { + // Reject invalid configurations so they don't accidentally succeed below. + case !flags.Read && !flags.Write: + return nil, syscall.EINVAL + + // Handle opening RDWR or with O_NONBLOCK: will never block, so try only once. + case (flags.Read && flags.Write) || flags.NonBlocking: + f, err := opener.NonBlockingOpen(ctx, fs.PermMask{Read: flags.Read, Write: flags.Write}) + if err != nil { + return nil, err + } + return newPipeOperations(ctx, opener, flags, f, nil) + + // Handle opening O_WRONLY blocking: convert ENXIO to syserror.ErrWouldBlock. + // See TryOpenWriteOnly for more details. + case flags.Write: + return p.TryOpenWriteOnly(ctx, opener) + + default: + // Handle opening O_RDONLY blocking: convert EOF from read to syserror.ErrWouldBlock. + // See TryOpenReadOnly for more details. + return p.TryOpenReadOnly(ctx, opener) + } +} + +// TryOpenReadOnly tries to open a host pipe read only but only returns a fs.File when +// there is a coordinating writer. Call TryOpenReadOnly repeatedly on the same pipeOpenState +// until syserror.ErrWouldBlock is no longer returned. +// +// How it works: +// +// Opening a pipe read only will return no error, but each non zero Read will return EOF +// until a writer becomes available, then EWOULDBLOCK. This is the only state change +// available to us. We keep a read ahead buffer in case we read bytes instead of getting +// EWOULDBLOCK, to be read from on the first read request to this fs.File. +func (p *pipeOpenState) TryOpenReadOnly(ctx context.Context, opener NonBlockingOpener) (*pipeOperations, error) { + // Waiting for a blocking read only open involves reading from the host pipe until + // bytes or other writers are available, so instead of retrying opening the pipe, + // it's necessary to retry reading from the pipe. To do this we need to keep around + // the read only pipe we opened, until success or an irrecoverable read error (at + // which point it must be closed). + if p.hostFile == nil { + var err error + p.hostFile, err = opener.NonBlockingOpen(ctx, fs.PermMask{Read: true}) + if err != nil { + return nil, err + } + } + + // Try to read from the pipe to see if writers are around. + tryReadBuffer := make([]byte, 1) + n, rerr := p.hostFile.Read(tryReadBuffer) + + // No bytes were read. + if n == 0 { + // EOF means that we're not ready yet. + if rerr == nil || rerr == io.EOF { + return nil, syserror.ErrWouldBlock + } + // Any error that is not EWOULDBLOCK also means we're not + // ready yet, and probably never will be ready. In this + // case we need to close the host pipe we opened. + if unwrapError(rerr) != syscall.EWOULDBLOCK { + p.hostFile.Close() + return nil, rerr + } + } + + // If any bytes were read, no matter the corresponding error, we need + // to keep them around so they can be read by the application. + var readAheadBuffer []byte + if n > 0 { + readAheadBuffer = tryReadBuffer + } + + // Successfully opened read only blocking pipe with either bytes available + // to read and/or a writer available. + return newPipeOperations(ctx, opener, fs.FileFlags{Read: true}, p.hostFile, readAheadBuffer) +} + +// TryOpenWriteOnly tries to open a host pipe write only but only returns a fs.File when +// there is a coordinating reader. Call TryOpenWriteOnly repeatedly on the same pipeOpenState +// until syserror.ErrWouldBlock is no longer returned. +// +// How it works: +// +// Opening a pipe write only will return ENXIO until readers are available. Converts the ENXIO +// to an syserror.ErrWouldBlock, to tell callers to retry. +func (*pipeOpenState) TryOpenWriteOnly(ctx context.Context, opener NonBlockingOpener) (*pipeOperations, error) { + hostFile, err := opener.NonBlockingOpen(ctx, fs.PermMask{Write: true}) + if unwrapError(err) == syscall.ENXIO { + return nil, syserror.ErrWouldBlock + } + if err != nil { + return nil, err + } + return newPipeOperations(ctx, opener, fs.FileFlags{Write: true}, hostFile, nil) +} diff --git a/pkg/sentry/fs/fdpipe/pipe_opener_test.go b/pkg/sentry/fs/fdpipe/pipe_opener_test.go new file mode 100644 index 000000000..e556da48a --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_opener_test.go @@ -0,0 +1,523 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "syscall" + "testing" + "time" + + "github.com/google/uuid" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +type hostOpener struct { + name string +} + +func (h *hostOpener) NonBlockingOpen(_ context.Context, p fs.PermMask) (*fd.FD, error) { + var flags int + switch { + case p.Read && p.Write: + flags = syscall.O_RDWR + case p.Write: + flags = syscall.O_WRONLY + case p.Read: + flags = syscall.O_RDONLY + default: + return nil, syscall.EINVAL + } + f, err := syscall.Open(h.name, flags|syscall.O_NONBLOCK, 0666) + if err != nil { + return nil, err + } + return fd.New(f), nil +} + +func pipename() string { + return fmt.Sprintf(path.Join(os.TempDir(), "test-named-pipe-%s"), uuid.New()) +} + +func mkpipe(name string) error { + return syscall.Mknod(name, syscall.S_IFIFO|0666, 0) +} + +func TestTryOpen(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // makePipe is true if the test case should create the pipe. + makePipe bool + + // flags are the fs.FileFlags used to open the pipe. + flags fs.FileFlags + + // expectFile is true if a fs.File is expected. + expectFile bool + + // err is the expected error + err error + }{ + { + desc: "FileFlags lacking Read and Write are invalid", + makePipe: false, + flags: fs.FileFlags{}, /* bogus */ + expectFile: false, + err: syscall.EINVAL, + }, + { + desc: "NonBlocking Read only error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "NonBlocking Read only success returns immediately", + makePipe: true, + flags: fs.FileFlags{Read: true, NonBlocking: true}, + expectFile: true, + err: nil, + }, + { + desc: "NonBlocking Write only error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Write: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "NonBlocking Write only no reader error returns immediately", + makePipe: true, + flags: fs.FileFlags{Write: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENXIO, + }, + { + desc: "ReadWrite error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true, Write: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "ReadWrite returns immediately", + makePipe: true, + flags: fs.FileFlags{Read: true, Write: true}, + expectFile: true, + err: nil, + }, + { + desc: "Blocking Write only returns open error", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Write: true}, + expectFile: false, + err: syscall.ENOENT, /* from bogus perms */ + }, + { + desc: "Blocking Read only returns open error", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "Blocking Write only returns with syserror.ErrWouldBlock", + makePipe: true, + flags: fs.FileFlags{Write: true}, + expectFile: false, + err: syserror.ErrWouldBlock, + }, + { + desc: "Blocking Read only returns with syserror.ErrWouldBlock", + makePipe: true, + flags: fs.FileFlags{Read: true}, + expectFile: false, + err: syserror.ErrWouldBlock, + }, + } { + name := pipename() + if test.makePipe { + // Create the pipe. We do this per-test case to keep tests independent. + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + } + + // Use a host opener to keep things simple. + opener := &hostOpener{name: name} + + pipeOpenState := &pipeOpenState{} + ctx := contexttest.Context(t) + pipeOps, err := pipeOpenState.TryOpen(ctx, opener, test.flags) + if unwrapError(err) != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + if pipeOps != nil { + // Cleanup the state of the pipe, and remove the fd from the + // fdnotifier. Sadly this needed to maintain the correctness + // of other tests because the fdnotifier is global. + pipeOps.Release() + } + continue + } + if (pipeOps != nil) != test.expectFile { + t.Errorf("%s: got non-nil file %v, want %v", test.desc, pipeOps != nil, test.expectFile) + } + if pipeOps != nil { + // Same as above. + pipeOps.Release() + } + } +} + +func TestPipeOpenUnblocksEventually(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // partnerIsReader is true if the goroutine opening the same pipe as the test case + // should open the pipe read only. Otherwise write only. This also means that the + // test case will open the pipe in the opposite way. + partnerIsReader bool + + // partnerIsBlocking is true if the goroutine opening the same pipe as the test case + // should do so without the O_NONBLOCK flag, otherwise opens the pipe with O_NONBLOCK + // until ENXIO is not returned. + partnerIsBlocking bool + }{ + { + desc: "Blocking Read with blocking writer partner opens eventually", + partnerIsReader: false, + partnerIsBlocking: true, + }, + { + desc: "Blocking Write with blocking reader partner opens eventually", + partnerIsReader: true, + partnerIsBlocking: true, + }, + { + desc: "Blocking Read with non-blocking writer partner opens eventually", + partnerIsReader: false, + partnerIsBlocking: false, + }, + { + desc: "Blocking Write with non-blocking reader partner opens eventually", + partnerIsReader: true, + partnerIsBlocking: false, + }, + } { + // Create the pipe. We do this per-test case to keep tests independent. + name := pipename() + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + + // Spawn the partner. + type fderr struct { + fd int + err error + } + errch := make(chan fderr, 1) + go func() { + var flags int + if test.partnerIsReader { + flags = syscall.O_RDONLY + } else { + flags = syscall.O_WRONLY + } + if test.partnerIsBlocking { + fd, err := syscall.Open(name, flags, 0666) + errch <- fderr{fd: fd, err: err} + } else { + var fd int + err := error(syscall.ENXIO) + for err == syscall.ENXIO { + fd, err = syscall.Open(name, flags|syscall.O_NONBLOCK, 0666) + time.Sleep(1 * time.Second) + } + errch <- fderr{fd: fd, err: err} + } + }() + + // Setup file flags for either a read only or write only open. + flags := fs.FileFlags{ + Read: !test.partnerIsReader, + Write: test.partnerIsReader, + } + + // Open the pipe in a blocking way, which should succeed eventually. + opener := &hostOpener{name: name} + ctx := contexttest.Context(t) + pipeOps, err := Open(ctx, opener, flags) + if pipeOps != nil { + // Same as TestTryOpen. + pipeOps.Release() + } + + // Check that the partner opened the file successfully. + e := <-errch + if e.err != nil { + t.Errorf("%s: partner got error %v, wanted nil", test.desc, e.err) + continue + } + // If so, then close the partner fd to avoid leaking an fd. + syscall.Close(e.fd) + + // Check that our blocking open was successful. + if err != nil { + t.Errorf("%s: blocking open got error %v, wanted nil", test.desc, err) + continue + } + if pipeOps == nil { + t.Errorf("%s: blocking open got nil file, wanted non-nil", test.desc) + continue + } + } +} + +func TestCopiedReadAheadBuffer(t *testing.T) { + // Create the pipe. + name := pipename() + if err := mkpipe(name); err != nil { + t.Fatalf("failed to make host pipe: %v", err) + } + defer syscall.Unlink(name) + + // We're taking advantage of the fact that pipes opened read only always return + // success, but internally they are not deemed "opened" until we're sure that + // another writer comes along. This means we can open the same pipe write only + // with no problems + write to it, given that opener.Open already tried to open + // the pipe RDONLY and succeeded, which we know happened if TryOpen returns + // syserror.ErrwouldBlock. + // + // This simulates the open(RDONLY) <-> open(WRONLY)+write race we care about, but + // does not cause our test to be racy (which would be terrible). + opener := &hostOpener{name: name} + pipeOpenState := &pipeOpenState{} + ctx := contexttest.Context(t) + pipeOps, err := pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true}) + if pipeOps != nil { + pipeOps.Release() + t.Fatalf("open(%s, %o) got file, want nil", name, syscall.O_RDONLY) + } + if err != syserror.ErrWouldBlock { + t.Fatalf("open(%s, %o) got error %v, want %v", name, syscall.O_RDONLY, err, syserror.ErrWouldBlock) + } + + // Then open the same pipe write only and write some bytes to it. The next + // time we try to open the pipe read only again via the pipeOpenState, we should + // succeed and buffer some of the bytes written. + fd, err := syscall.Open(name, syscall.O_WRONLY, 0666) + if err != nil { + t.Fatalf("open(%s, %o) got error %v, want nil", name, syscall.O_WRONLY, err) + } + defer syscall.Close(fd) + + data := []byte("hello") + if n, err := syscall.Write(fd, data); n != len(data) || err != nil { + t.Fatalf("write(%v) got (%d, %v), want (%d, nil)", data, n, err, len(data)) + } + + // Try the read again, knowing that it should succeed this time. + pipeOps, err = pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true}) + if pipeOps == nil { + t.Fatalf("open(%s, %o) got nil file, want not nil", name, syscall.O_RDONLY) + } + defer pipeOps.Release() + + if err != nil { + t.Fatalf("open(%s, %o) got error %v, want nil", name, syscall.O_RDONLY, err) + } + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, pipeOps) + + // Check that the file we opened points to a pipe with a non-empty read ahead buffer. + bufsize := len(pipeOps.readAheadBuffer) + if bufsize != 1 { + t.Fatalf("read ahead buffer got %d bytes, want %d", bufsize, 1) + } + + // Now for the final test, try to read everything in, expecting to get back all of + // the bytes that were written at once. Note that in the wild there is no atomic + // read size so expecting to get all bytes from a single writer when there are + // multiple readers is a bad expectation. + buf := make([]byte, len(data)) + ioseq := usermem.BytesIOSequence(buf) + n, err := pipeOps.Read(ctx, file, ioseq, 0) + if err != nil { + t.Fatalf("read request got error %v, want nil", err) + } + if n != int64(len(data)) { + t.Fatalf("read request got %d bytes, want %d", n, len(data)) + } + if !bytes.Equal(buf, data) { + t.Errorf("read request got bytes [%v], want [%v]", buf, data) + } +} + +func TestPipeHangup(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // flags control how we open our end of the pipe and must be read + // only or write only. They also dicate how a coordinating partner + // fd is opened, which is their inverse (read only -> write only, etc). + flags fs.FileFlags + + // hangupSelf if true causes the test case to close our end of the pipe + // and causes hangup errors to be asserted on our coordinating partner's + // fd. If hangupSelf is false, then our partner's fd is closed and the + // hangup errors are expected on our end of the pipe. + hangupSelf bool + }{ + { + desc: "Read only gets hangup error", + flags: fs.FileFlags{Read: true}, + }, + { + desc: "Write only gets hangup error", + flags: fs.FileFlags{Write: true}, + }, + { + desc: "Read only generates hangup error", + flags: fs.FileFlags{Read: true}, + hangupSelf: true, + }, + { + desc: "Write only generates hangup error", + flags: fs.FileFlags{Write: true}, + hangupSelf: true, + }, + } { + if test.flags.Read == test.flags.Write { + t.Errorf("%s: test requires a single reader or writer", test.desc) + continue + } + + // Create the pipe. We do this per-test case to keep tests independent. + name := pipename() + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + + // Fire off a partner routine which tries to open the same pipe blocking, + // which will synchronize with us. The channel allows us to get back the + // fd once we expect this partner routine to succeed, so we can manifest + // hangup events more directly. + fdchan := make(chan int, 1) + go func() { + // Be explicit about the flags to protect the test from + // misconfiguration. + var flags int + if test.flags.Read { + flags = syscall.O_WRONLY + } else { + flags = syscall.O_RDONLY + } + fd, err := syscall.Open(name, flags, 0666) + if err != nil { + t.Logf("Open(%q, %o, 0666) partner failed: %v", name, flags, err) + } + fdchan <- fd + }() + + // Open our end in a blocking way to ensure that we coordinate. + opener := &hostOpener{name: name} + ctx := contexttest.Context(t) + pipeOps, err := Open(ctx, opener, test.flags) + if err != nil { + t.Errorf("%s: Open got error %v, want nil", test.desc, err) + continue + } + // Don't defer file.DecRef here because that causes the hangup we're + // trying to test for. + + // Expect the partner routine to have coordinated with us and get back + // its open fd. + f := <-fdchan + if f < 0 { + t.Errorf("%s: partner routine got fd %d, want > 0", test.desc, f) + pipeOps.Release() + continue + } + + if test.hangupSelf { + // Hangup self and assert that our partner got the expected hangup + // error. + pipeOps.Release() + + if test.flags.Read { + // Partner is writer. + assertWriterHungup(t, test.desc, fd.NewReadWriter(f)) + } else { + // Partner is reader. + assertReaderHungup(t, test.desc, fd.NewReadWriter(f)) + } + } else { + // Hangup our partner and expect us to get the hangup error. + syscall.Close(f) + defer pipeOps.Release() + + if test.flags.Read { + assertReaderHungup(t, test.desc, pipeOps.(*pipeOperations).file) + } else { + assertWriterHungup(t, test.desc, pipeOps.(*pipeOperations).file) + } + } + } +} + +func assertReaderHungup(t *testing.T, desc string, reader io.Reader) bool { + // Drain the pipe completely, it might have crap in it, but expect EOF eventually. + var err error + for err == nil { + _, err = reader.Read(make([]byte, 10)) + } + if err != io.EOF { + t.Errorf("%s: read from self after hangup got error %v, want %v", desc, err, io.EOF) + return false + } + return true +} + +func assertWriterHungup(t *testing.T, desc string, writer io.Writer) bool { + if _, err := writer.Write([]byte("hello")); unwrapError(err) != syscall.EPIPE { + t.Errorf("%s: write to self after hangup got error %v, want %v", desc, err, syscall.EPIPE) + return false + } + return true +} diff --git a/pkg/sentry/fs/fdpipe/pipe_state.go b/pkg/sentry/fs/fdpipe/pipe_state.go new file mode 100644 index 000000000..af8230a7d --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_state.go @@ -0,0 +1,89 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "fmt" + "io/ioutil" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sync" +) + +// beforeSave is invoked by stateify. +func (p *pipeOperations) beforeSave() { + if p.flags.Read { + data, err := ioutil.ReadAll(p.file) + if err != nil && !isBlockError(err) { + panic(fmt.Sprintf("failed to read from pipe: %v", err)) + } + p.readAheadBuffer = append(p.readAheadBuffer, data...) + } else if p.flags.Write { + file, err := p.opener.NonBlockingOpen(context.Background(), fs.PermMask{Write: true}) + if err != nil { + panic(fs.ErrSaveRejection{fmt.Errorf("write-only pipe end cannot be re-opened as %v: %v", p, err)}) + } + file.Close() + } +} + +// saveFlags is invoked by stateify. +func (p *pipeOperations) saveFlags() fs.FileFlags { + return p.flags +} + +// readPipeOperationsLoading is used to ensure that write-only pipe fds are +// opened after read/write and read-only pipe fds, to avoid ENXIO when +// multiple pipe fds refer to different ends of the same pipe. +var readPipeOperationsLoading sync.WaitGroup + +// loadFlags is invoked by stateify. +func (p *pipeOperations) loadFlags(flags fs.FileFlags) { + // This is a hack to ensure that readPipeOperationsLoading includes all + // readable pipe fds before any asynchronous calls to + // readPipeOperationsLoading.Wait(). + if flags.Read { + readPipeOperationsLoading.Add(1) + } + p.flags = flags +} + +// afterLoad is invoked by stateify. +func (p *pipeOperations) afterLoad() { + load := func() error { + if !p.flags.Read { + readPipeOperationsLoading.Wait() + } else { + defer readPipeOperationsLoading.Done() + } + var err error + p.file, err = p.opener.NonBlockingOpen(context.Background(), fs.PermMask{ + Read: p.flags.Read, + Write: p.flags.Write, + }) + if err != nil { + return fmt.Errorf("unable to open pipe %v: %v", p, err) + } + if err := p.init(); err != nil { + return fmt.Errorf("unable to initialize pipe %v: %v", p, err) + } + return nil + } + + // Do background opening of pipe ends. Note for write-only pipe ends we + // have to do it asynchronously to avoid blocking the restore. + fs.Async(fs.CatchError(load)) +} diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go new file mode 100644 index 000000000..a0082ecca --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_test.go @@ -0,0 +1,505 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "bytes" + "io" + "os" + "syscall" + "testing" + + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +func singlePipeFD() (int, error) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + return -1, err + } + syscall.Close(fds[1]) + return fds[0], nil +} + +func singleDirFD() (int, error) { + return syscall.Open(os.TempDir(), syscall.O_RDONLY, 0666) +} + +func mockPipeDirent(t *testing.T) *fs.Dirent { + ctx := contexttest.Context(t) + node := fs.NewMockInodeOperations(ctx) + node.UAttr = fs.UnstableAttr{ + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + } + inode := fs.NewInode(ctx, node, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + BlockSize: usermem.PageSize, + }) + return fs.NewDirent(ctx, inode, "") +} + +func TestNewPipe(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // getfd generates the fd to pass to newPipeOperations. + getfd func() (int, error) + + // flags are the fs.FileFlags passed to newPipeOperations. + flags fs.FileFlags + + // readAheadBuffer is the buffer passed to newPipeOperations. + readAheadBuffer []byte + + // err is the expected error. + err error + }{ + { + desc: "Cannot make new pipe from bad fd", + getfd: func() (int, error) { return -1, nil }, + err: syscall.EINVAL, + }, + { + desc: "Cannot make new pipe from non-pipe fd", + getfd: singleDirFD, + err: syscall.EINVAL, + }, + { + desc: "Can make new pipe from pipe fd", + getfd: singlePipeFD, + flags: fs.FileFlags{Read: true}, + readAheadBuffer: []byte("hello"), + }, + } { + gfd, err := test.getfd() + if err != nil { + t.Errorf("%s: getfd got (%d, %v), want (fd, nil)", test.desc, gfd, err) + continue + } + f := fd.New(gfd) + + p, err := newPipeOperations(contexttest.Context(t), nil, test.flags, f, test.readAheadBuffer) + if p != nil { + // This is necessary to remove the fd from the global fd notifier. + defer p.Release() + } else { + // If there is no p to DecRef on, because newPipeOperations failed, then the + // file still needs to be closed. + defer f.Close() + } + + if err != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + continue + } + // Check the state of the pipe given that it was successfully opened. + if err == nil { + if p == nil { + t.Errorf("%s: got nil pipe and nil error, want (pipe, nil)", test.desc) + continue + } + if flags := p.flags; test.flags != flags { + t.Errorf("%s: got file flags %v, want %v", test.desc, flags, test.flags) + continue + } + if len(test.readAheadBuffer) != len(p.readAheadBuffer) { + t.Errorf("%s: got read ahead buffer length %d, want %d", test.desc, len(p.readAheadBuffer), len(test.readAheadBuffer)) + continue + } + fileFlags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(p.file.FD()), syscall.F_GETFL, 0) + if errno != 0 { + t.Errorf("%s: failed to get file flags for fd %d, got %v, want 0", test.desc, p.file.FD(), errno) + continue + } + if fileFlags&syscall.O_NONBLOCK == 0 { + t.Errorf("%s: pipe is blocking, expected non-blocking", test.desc) + continue + } + if !fdnotifier.HasFD(int32(f.FD())) { + t.Errorf("%s: pipe fd %d is not registered for events", test.desc, f.FD()) + } + } + } +} + +func TestPipeDestruction(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + f := fd.New(fds[0]) + + // We don't care about the other end, just use the read end. + syscall.Close(fds[1]) + + // Test the read end, but it doesn't really matter which. + p, err := newPipeOperations(contexttest.Context(t), nil, fs.FileFlags{Read: true}, f, nil) + if err != nil { + f.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Drop our only reference, which should trigger the destructor. + p.Release() + + if fdnotifier.HasFD(int32(fds[0])) { + t.Fatalf("after DecRef fdnotifier has fd %d, want no longer registered", fds[0]) + } + if p.file != nil { + t.Errorf("after DecRef got file, want nil") + } +} + +type Seek struct{} + +type ReadDir struct{} + +type Writev struct { + Src usermem.IOSequence +} + +type Readv struct { + Dst usermem.IOSequence +} + +type Fsync struct{} + +func TestPipeRequest(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // request to execute. + context interface{} + + // flags determines whether to use the read or write end + // of the pipe, for this test it can only be Read or Write. + flags fs.FileFlags + + // keepOpenPartner if false closes the other end of the pipe, + // otherwise this is delayed until the end of the test. + keepOpenPartner bool + + // expected error + err error + }{ + { + desc: "ReadDir on pipe returns ENOTDIR", + context: &ReadDir{}, + err: syscall.ENOTDIR, + }, + { + desc: "Fsync on pipe returns EINVAL", + context: &Fsync{}, + err: syscall.EINVAL, + }, + { + desc: "Seek on pipe returns ESPIPE", + context: &Seek{}, + err: syscall.ESPIPE, + }, + { + desc: "Readv on pipe from empty buffer returns nil", + context: &Readv{Dst: usermem.BytesIOSequence(nil)}, + flags: fs.FileFlags{Read: true}, + }, + { + desc: "Readv on pipe from non-empty buffer and closed partner returns EOF", + context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))}, + flags: fs.FileFlags{Read: true}, + err: io.EOF, + }, + { + desc: "Readv on pipe from non-empty buffer and open partner returns EWOULDBLOCK", + context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))}, + flags: fs.FileFlags{Read: true}, + keepOpenPartner: true, + err: syserror.ErrWouldBlock, + }, + { + desc: "Writev on pipe from empty buffer returns nil", + context: &Writev{Src: usermem.BytesIOSequence(nil)}, + flags: fs.FileFlags{Write: true}, + }, + { + desc: "Writev on pipe from non-empty buffer and closed partner returns EPIPE", + context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))}, + flags: fs.FileFlags{Write: true}, + err: syscall.EPIPE, + }, + { + desc: "Writev on pipe from non-empty buffer and open partner succeeds", + context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))}, + flags: fs.FileFlags{Write: true}, + keepOpenPartner: true, + }, + } { + if test.flags.Read && test.flags.Write { + panic("both read and write not supported for this test") + } + + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Errorf("%s: failed to create pipes: got %v, want nil", test.desc, err) + continue + } + + // Configure the fd and partner fd based on the file flags. + testFd, partnerFd := fds[0], fds[1] + if test.flags.Write { + testFd, partnerFd = fds[1], fds[0] + } + + // Configure closing the fds. + if test.keepOpenPartner { + defer syscall.Close(partnerFd) + } else { + syscall.Close(partnerFd) + } + + // Create the pipe. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, test.flags, fd.New(testFd), nil) + if err != nil { + t.Fatalf("%s: newPipeOperations got error %v, want nil", test.desc, err) + } + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Pipe}) + file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p) + + // Issue request via the appropriate function. + switch c := test.context.(type) { + case *Seek: + _, err = p.Seek(ctx, file, 0, 0) + case *ReadDir: + _, err = p.Readdir(ctx, file, nil) + case *Readv: + _, err = p.Read(ctx, file, c.Dst, 0) + case *Writev: + _, err = p.Write(ctx, file, c.Src, 0) + case *Fsync: + err = p.Fsync(ctx, file, 0, fs.FileMaxOffset, fs.SyncAll) + default: + t.Errorf("%s: unknown request type %T", test.desc, test.context) + } + + if unwrapError(err) != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + } + } +} + +func TestPipeReadAheadBuffer(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + rfile := fd.New(fds[0]) + + // Eventually close the write end, which is not wrapped in a pipe object. + defer syscall.Close(fds[1]) + + // Write some bytes to this end. + data := []byte("world") + if n, err := syscall.Write(fds[1], data); n != len(data) || err != nil { + rfile.Close() + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data)) + } + // Close the write end immediately, we don't care about it. + + buffered := []byte("hello ") + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, buffered) + if err != nil { + rfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p) + + // In total we expect to read data + buffered. + total := append(buffered, data...) + + buf := make([]byte, len(total)) + iov := usermem.BytesIOSequence(buf) + n, err := p.Read(contexttest.Context(t), file, iov, 0) + if err != nil { + t.Fatalf("read request got error %v, want nil", err) + } + if n != int64(len(total)) { + t.Fatalf("read request got %d bytes, want %d", n, len(total)) + } + if !bytes.Equal(buf, total) { + t.Errorf("read request got bytes [%v], want [%v]", buf, total) + } +} + +// This is very important for pipes in general because they can return +// EWOULDBLOCK and for those that block they must continue until they have read +// all of the data (and report it as such). +func TestPipeReadsAccumulate(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + rfile := fd.New(fds[0]) + + // Eventually close the write end, it doesn't depend on a pipe object. + defer syscall.Close(fds[1]) + + // Get a new read only pipe reference. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, nil) + if err != nil { + rfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Don't forget to remove the fd from the fd notifier. Otherwise other tests will + // likely be borked, because it's global :( + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p) + + // Write some some bytes to the pipe. + data := []byte("some message") + if n, err := syscall.Write(fds[1], data); n != len(data) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data)) + } + + // Construct a segment vec that is a bit more than we have written so we + // trigger an EWOULDBLOCK. + wantBytes := len(data) + 1 + readBuffer := make([]byte, wantBytes) + iov := usermem.BytesIOSequence(readBuffer) + n, err := p.Read(ctx, file, iov, 0) + total := n + iov = iov.DropFirst64(n) + if err != syserror.ErrWouldBlock { + t.Fatalf("Readv got error %v, want %v", err, syserror.ErrWouldBlock) + } + + // Write a few more bytes to allow us to read more/accumulate. + extra := []byte("extra") + if n, err := syscall.Write(fds[1], extra); n != len(extra) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(extra)) + } + + // This time, using the same request, we should not block. + n, err = p.Read(ctx, file, iov, 0) + total += n + if err != nil { + t.Fatalf("Readv got error %v, want nil", err) + } + + // Assert that the result we got back is cumulative. + if total != int64(wantBytes) { + t.Fatalf("Readv sequence got %d bytes, want %d", total, wantBytes) + } + + if want := append(data, extra[0]); !bytes.Equal(readBuffer, want) { + t.Errorf("Readv sequence got %v, want %v", readBuffer, want) + } +} + +// Same as TestReadsAccumulate. +func TestPipeWritesAccumulate(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + wfile := fd.New(fds[1]) + + // Eventually close the read end, it doesn't depend on a pipe object. + defer syscall.Close(fds[0]) + + // Get a new write only pipe reference. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Write: true}, wfile, nil) + if err != nil { + wfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Don't forget to remove the fd from the fd notifier. Otherwise other tests + // will likely be borked, because it's global :( + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(ctx, inode, "pipe"), fs.FileFlags{Read: true}, p) + + pipeSize, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(wfile.FD()), syscall.F_GETPIPE_SZ, 0) + if errno != 0 { + t.Fatalf("fcntl(F_GETPIPE_SZ) failed: %v", errno) + } + t.Logf("Pipe buffer size: %d", pipeSize) + + // Construct a segment vec that is larger than the pipe size to trigger an + // EWOULDBLOCK. + wantBytes := int(pipeSize) * 2 + writeBuffer := make([]byte, wantBytes) + for i := 0; i < wantBytes; i++ { + writeBuffer[i] = 'a' + } + iov := usermem.BytesIOSequence(writeBuffer) + n, err := p.Write(ctx, file, iov, 0) + if err != syserror.ErrWouldBlock { + t.Fatalf("Writev got error %v, want %v", err, syserror.ErrWouldBlock) + } + if n != int64(pipeSize) { + t.Fatalf("Writev partial write, got: %v, want %v", n, pipeSize) + } + total := n + iov = iov.DropFirst64(n) + + // Read the entire pipe buf size to make space for the second half. + readBuffer := make([]byte, n) + if n, err := syscall.Read(fds[0], readBuffer); n != len(readBuffer) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(readBuffer)) + } + if !bytes.Equal(readBuffer, writeBuffer[:len(readBuffer)]) { + t.Fatalf("wrong data read from pipe, got: %v, want: %v", readBuffer, writeBuffer) + } + + // This time we should not block. + n, err = p.Write(ctx, file, iov, 0) + if err != nil { + t.Fatalf("Writev got error %v, want nil", err) + } + if n != int64(pipeSize) { + t.Fatalf("Writev partial write, got: %v, want %v", n, pipeSize) + } + total += n + + // Assert that the result we got back is cumulative. + if total != int64(wantBytes) { + t.Fatalf("Writev sequence got %d bytes, want %d", total, wantBytes) + } +} diff --git a/pkg/sentry/fs/file.go b/pkg/sentry/fs/file.go new file mode 100644 index 000000000..ca41520b4 --- /dev/null +++ b/pkg/sentry/fs/file.go @@ -0,0 +1,593 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "math" + "sync/atomic" + "time" + + "gvisor.dev/gvisor/pkg/amutex" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/metric" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/fs/lock" + "gvisor.dev/gvisor/pkg/sentry/limits" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/uniqueid" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +var ( + // RecordWaitTime controls writing metrics for filesystem reads. + // Enabling this comes at a small CPU cost due to performing two + // monotonic clock reads per read call. + // + // Note that this is only performed in the direct read path, and may + // not be consistently applied for other forms of reads, such as + // splice. + RecordWaitTime = false + + reads = metric.MustCreateNewUint64Metric("/fs/reads", false /* sync */, "Number of file reads.") + readWait = metric.MustCreateNewUint64NanosecondsMetric("/fs/read_wait", false /* sync */, "Time waiting on file reads, in nanoseconds.") +) + +// IncrementWait increments the given wait time metric, if enabled. +func IncrementWait(m *metric.Uint64Metric, start time.Time) { + if !RecordWaitTime { + return + } + m.IncrementBy(uint64(time.Since(start))) +} + +// FileMaxOffset is the maximum possible file offset. +const FileMaxOffset = math.MaxInt64 + +// File is an open file handle. It is thread-safe. +// +// File provides stronger synchronization guarantees than Linux. Linux +// synchronizes lseek(2), read(2), and write(2) with respect to the file +// offset for regular files and only for those interfaces. See +// fs/read_write.c:fdget_pos, fs.read_write.c:fdput_pos and FMODE_ATOMIC_POS. +// +// In contrast, File synchronizes any operation that could take a long time +// under a single abortable mutex which also synchronizes lseek(2), read(2), +// and write(2). +// +// FIXME(b/38451980): Split synchronization from cancellation. +// +// +stateify savable +type File struct { + refs.AtomicRefCount + + // UniqueID is the globally unique identifier of the File. + UniqueID uint64 + + // Dirent is the Dirent backing this File. This encodes the name + // of the File via Dirent.FullName() as well as its identity via the + // Dirent's Inode. The Dirent is non-nil. + // + // A File holds a reference to this Dirent. Using the returned Dirent is + // only safe as long as a reference on the File is held. The association + // between a File and a Dirent is immutable. + // + // Files that are not parented in a filesystem return a root Dirent + // that holds a reference to their Inode. + // + // The name of the Dirent may reflect parentage if the Dirent is not a + // root Dirent or the identity of the File on a pseudo filesystem (pipefs, + // sockfs, etc). + // + // Multiple Files may hold a reference to the same Dirent. This is the + // common case for Files that are parented and maintain consistency with + // other files via the Dirent cache. + Dirent *Dirent + + // flagsMu protects flags and async below. + flagsMu sync.Mutex `state:"nosave"` + + // flags are the File's flags. Setting or getting flags is fully atomic + // and is not protected by mu (below). + flags FileFlags + + // async handles O_ASYNC notifications. + async FileAsync + + // saving indicates that this file is in the process of being saved. + saving bool `state:"nosave"` + + // mu is dual-purpose: first, to make read(2) and write(2) thread-safe + // in conformity with POSIX, and second, to cancel operations before they + // begin in response to interruptions (i.e. signals). + mu amutex.AbortableMutex `state:"nosave"` + + // FileOperations implements file system specific behavior for this File. + FileOperations FileOperations `state:"wait"` + + // offset is the File's offset. Updating offset is protected by mu but + // can be read atomically via File.Offset() outside of mu. + offset int64 +} + +// NewFile returns a File. It takes a reference on the Dirent and owns the +// lifetime of the FileOperations. Files that do not support reading and +// writing at an arbitrary offset should set flags.Pread and flags.Pwrite +// to false respectively. +func NewFile(ctx context.Context, dirent *Dirent, flags FileFlags, fops FileOperations) *File { + dirent.IncRef() + f := File{ + UniqueID: uniqueid.GlobalFromContext(ctx), + Dirent: dirent, + FileOperations: fops, + flags: flags, + } + f.mu.Init() + f.EnableLeakCheck("fs.File") + return &f +} + +// DecRef destroys the File when it is no longer referenced. +func (f *File) DecRef() { + f.DecRefWithDestructor(func() { + // Drop BSD style locks. + lockRng := lock.LockRange{Start: 0, End: lock.LockEOF} + f.Dirent.Inode.LockCtx.BSD.UnlockRegion(f, lockRng) + + // Release resources held by the FileOperations. + f.FileOperations.Release() + + // Release a reference on the Dirent. + f.Dirent.DecRef() + + // Only unregister if we are currently registered. There is nothing + // to register if f.async is nil (this happens when async mode is + // enabled without setting an owner). Also, we unregister during + // save. + f.flagsMu.Lock() + if !f.saving && f.flags.Async && f.async != nil { + f.async.Unregister(f) + } + f.async = nil + f.flagsMu.Unlock() + }) +} + +// Flags atomically loads the File's flags. +func (f *File) Flags() FileFlags { + f.flagsMu.Lock() + flags := f.flags + f.flagsMu.Unlock() + return flags +} + +// SetFlags atomically changes the File's flags to the values contained +// in newFlags. See SettableFileFlags for values that can be set. +func (f *File) SetFlags(newFlags SettableFileFlags) { + f.flagsMu.Lock() + f.flags.Direct = newFlags.Direct + f.flags.NonBlocking = newFlags.NonBlocking + f.flags.Append = newFlags.Append + if f.async != nil { + if newFlags.Async && !f.flags.Async { + f.async.Register(f) + } + if !newFlags.Async && f.flags.Async { + f.async.Unregister(f) + } + } + f.flags.Async = newFlags.Async + f.flagsMu.Unlock() +} + +// Offset atomically loads the File's offset. +func (f *File) Offset() int64 { + return atomic.LoadInt64(&f.offset) +} + +// Readiness implements waiter.Waitable.Readiness. +func (f *File) Readiness(mask waiter.EventMask) waiter.EventMask { + return f.FileOperations.Readiness(mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (f *File) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + f.FileOperations.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (f *File) EventUnregister(e *waiter.Entry) { + f.FileOperations.EventUnregister(e) +} + +// Seek calls f.FileOperations.Seek with f as the File, updating the file +// offset to the value returned by f.FileOperations.Seek if the operation +// is successful. +// +// Returns syserror.ErrInterrupted if seeking was interrupted. +func (f *File) Seek(ctx context.Context, whence SeekWhence, offset int64) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + defer f.mu.Unlock() + + newOffset, err := f.FileOperations.Seek(ctx, f, whence, offset) + if err == nil { + atomic.StoreInt64(&f.offset, newOffset) + } + return newOffset, err +} + +// Readdir reads the directory entries of this File and writes them out +// to the DentrySerializer until entries can no longer be written. If even +// a single directory entry is written then Readdir returns a nil error +// and the directory offset is advanced. +// +// Readdir unconditionally updates the access time on the File's Inode, +// see fs/readdir.c:iterate_dir. +// +// Returns syserror.ErrInterrupted if reading was interrupted. +func (f *File) Readdir(ctx context.Context, serializer DentrySerializer) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + offset, err := f.FileOperations.Readdir(ctx, f, serializer) + atomic.StoreInt64(&f.offset, offset) + return err +} + +// Readv calls f.FileOperations.Read with f as the File, advancing the file +// offset if f.FileOperations.Read returns bytes read > 0. +// +// Returns syserror.ErrInterrupted if reading was interrupted. +func (f *File) Readv(ctx context.Context, dst usermem.IOSequence) (int64, error) { + var start time.Time + if RecordWaitTime { + start = time.Now() + } + if !f.mu.Lock(ctx) { + IncrementWait(readWait, start) + return 0, syserror.ErrInterrupted + } + + reads.Increment() + n, err := f.FileOperations.Read(ctx, f, dst, f.offset) + if n > 0 && !f.flags.NonSeekable { + atomic.AddInt64(&f.offset, n) + } + f.mu.Unlock() + IncrementWait(readWait, start) + return n, err +} + +// Preadv calls f.FileOperations.Read with f as the File. It does not +// advance the file offset. If !f.Flags().Pread, Preadv should not be +// called. +// +// Otherwise same as Readv. +func (f *File) Preadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + var start time.Time + if RecordWaitTime { + start = time.Now() + } + if !f.mu.Lock(ctx) { + IncrementWait(readWait, start) + return 0, syserror.ErrInterrupted + } + + reads.Increment() + n, err := f.FileOperations.Read(ctx, f, dst, offset) + f.mu.Unlock() + IncrementWait(readWait, start) + return n, err +} + +// Writev calls f.FileOperations.Write with f as the File, advancing the +// file offset if f.FileOperations.Write returns bytes written > 0. +// +// Writev positions the write offset at EOF if f.Flags().Append. This is +// unavoidably racy for network file systems. Writev also truncates src +// to avoid overrunning the current file size limit if necessary. +// +// Returns syserror.ErrInterrupted if writing was interrupted. +func (f *File) Writev(ctx context.Context, src usermem.IOSequence) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + unlockAppendMu := f.Dirent.Inode.lockAppendMu(f.Flags().Append) + // Handle append mode. + if f.Flags().Append { + if err := f.offsetForAppend(ctx, &f.offset); err != nil { + unlockAppendMu() + f.mu.Unlock() + return 0, err + } + } + + // Enforce file limits. + limit, ok := f.checkLimit(ctx, f.offset) + switch { + case ok && limit == 0: + unlockAppendMu() + f.mu.Unlock() + return 0, syserror.ErrExceedsFileSizeLimit + case ok: + src = src.TakeFirst64(limit) + } + + // We must hold the lock during the write. + n, err := f.FileOperations.Write(ctx, f, src, f.offset) + if n >= 0 && !f.flags.NonSeekable { + atomic.StoreInt64(&f.offset, f.offset+n) + } + unlockAppendMu() + f.mu.Unlock() + return n, err +} + +// Pwritev calls f.FileOperations.Write with f as the File. It does not +// advance the file offset. If !f.Flags().Pwritev, Pwritev should not be +// called. +// +// Otherwise same as Writev. +func (f *File) Pwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // "POSIX requires that opening a file with the O_APPEND flag should + // have no effect on the location at which pwrite() writes data. + // However, on Linux, if a file is opened with O_APPEND, pwrite() + // appends data to the end of the file, regardless of the value of + // offset." + unlockAppendMu := f.Dirent.Inode.lockAppendMu(f.Flags().Append) + defer unlockAppendMu() + if f.Flags().Append { + if err := f.offsetForAppend(ctx, &offset); err != nil { + return 0, err + } + } + + // Enforce file limits. + limit, ok := f.checkLimit(ctx, offset) + switch { + case ok && limit == 0: + return 0, syserror.ErrExceedsFileSizeLimit + case ok: + src = src.TakeFirst64(limit) + } + + return f.FileOperations.Write(ctx, f, src, offset) +} + +// offsetForAppend atomically sets the given offset to the end of the file. +// +// Precondition: the file.Dirent.Inode.appendMu mutex should be held for +// writing. +func (f *File) offsetForAppend(ctx context.Context, offset *int64) error { + uattr, err := f.Dirent.Inode.UnstableAttr(ctx) + if err != nil { + // This is an odd error, we treat it as evidence that + // something is terribly wrong with the filesystem. + return syserror.EIO + } + + // Update the offset. + atomic.StoreInt64(offset, uattr.Size) + + return nil +} + +// checkLimit checks the offset that the write will be performed at. The +// returned boolean indicates that the write must be limited. The returned +// integer indicates the new maximum write length. +func (f *File) checkLimit(ctx context.Context, offset int64) (int64, bool) { + if IsRegular(f.Dirent.Inode.StableAttr) { + // Enforce size limits. + fileSizeLimit := limits.FromContext(ctx).Get(limits.FileSize).Cur + if fileSizeLimit <= math.MaxInt64 { + if offset >= int64(fileSizeLimit) { + return 0, true + } + return int64(fileSizeLimit) - offset, true + } + } + + return 0, false +} + +// Fsync calls f.FileOperations.Fsync with f as the File. +// +// Returns syserror.ErrInterrupted if syncing was interrupted. +func (f *File) Fsync(ctx context.Context, start int64, end int64, syncType SyncType) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.Fsync(ctx, f, start, end, syncType) +} + +// Flush calls f.FileOperations.Flush with f as the File. +// +// Returns syserror.ErrInterrupted if syncing was interrupted. +func (f *File) Flush(ctx context.Context) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.Flush(ctx, f) +} + +// ConfigureMMap calls f.FileOperations.ConfigureMMap with f as the File. +// +// Returns syserror.ErrInterrupted if interrupted. +func (f *File) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.ConfigureMMap(ctx, f, opts) +} + +// UnstableAttr calls f.FileOperations.UnstableAttr with f as the File. +// +// Returns syserror.ErrInterrupted if interrupted. +func (f *File) UnstableAttr(ctx context.Context) (UnstableAttr, error) { + if !f.mu.Lock(ctx) { + return UnstableAttr{}, syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.UnstableAttr(ctx, f) +} + +// MappedName implements memmap.MappingIdentity.MappedName. +func (f *File) MappedName(ctx context.Context) string { + root := RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + name, _ := f.Dirent.FullName(root) + return name +} + +// DeviceID implements memmap.MappingIdentity.DeviceID. +func (f *File) DeviceID() uint64 { + return f.Dirent.Inode.StableAttr.DeviceID +} + +// InodeID implements memmap.MappingIdentity.InodeID. +func (f *File) InodeID() uint64 { + return f.Dirent.Inode.StableAttr.InodeID +} + +// Msync implements memmap.MappingIdentity.Msync. +func (f *File) Msync(ctx context.Context, mr memmap.MappableRange) error { + return f.Fsync(ctx, int64(mr.Start), int64(mr.End-1), SyncData) +} + +// A FileAsync sends signals to its owner when w is ready for IO. +type FileAsync interface { + Register(w waiter.Waitable) + Unregister(w waiter.Waitable) +} + +// Async gets the stored FileAsync or creates a new one with the supplied +// function. If the supplied function is nil, no FileAsync is created and the +// current value is returned. +func (f *File) Async(newAsync func() FileAsync) FileAsync { + f.flagsMu.Lock() + defer f.flagsMu.Unlock() + if f.async == nil && newAsync != nil { + f.async = newAsync() + if f.flags.Async { + f.async.Register(f) + } + } + return f.async +} + +// lockedReader implements io.Reader and io.ReaderAt. +// +// Note this reads the underlying file using the file operations directly. It +// is the responsibility of the caller to ensure that locks are appropriately +// held and offsets updated if required. This should be used only by internal +// functions that perform these operations and checks at other times. +type lockedReader struct { + // Ctx is the context for the file reader. + Ctx context.Context + + // File is the file to read from. + File *File + + // Offset is the offset to start at. + // + // This applies only to Read, not ReadAt. + Offset int64 +} + +// Read implements io.Reader.Read. +func (r *lockedReader) Read(buf []byte) (int, error) { + if r.Ctx.Interrupted() { + return 0, syserror.ErrInterrupted + } + n, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), r.Offset) + r.Offset += n + return int(n), err +} + +// ReadAt implements io.Reader.ReadAt. +func (r *lockedReader) ReadAt(buf []byte, offset int64) (int, error) { + if r.Ctx.Interrupted() { + return 0, syserror.ErrInterrupted + } + n, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), offset) + return int(n), err +} + +// lockedWriter implements io.Writer and io.WriterAt. +// +// The same constraints as lockedReader apply; see above. +type lockedWriter struct { + // Ctx is the context for the file writer. + Ctx context.Context + + // File is the file to write to. + File *File + + // Offset is the offset to start at. + // + // This applies only to Write, not WriteAt. + Offset int64 +} + +// Write implements io.Writer.Write. +func (w *lockedWriter) Write(buf []byte) (int, error) { + if w.Ctx.Interrupted() { + return 0, syserror.ErrInterrupted + } + n, err := w.WriteAt(buf, w.Offset) + w.Offset += int64(n) + return int(n), err +} + +// WriteAt implements io.Writer.WriteAt. +func (w *lockedWriter) WriteAt(buf []byte, offset int64) (int, error) { + var ( + written int + err error + ) + // The io.Writer contract requires that Write writes all available + // bytes and does not return short writes. This causes errors with + // io.Copy, since our own Write interface does not have this same + // contract. Enforce that here. + for written < len(buf) { + if w.Ctx.Interrupted() { + return written, syserror.ErrInterrupted + } + var n int64 + n, err = w.File.FileOperations.Write(w.Ctx, w.File, usermem.BytesIOSequence(buf[written:]), offset+int64(written)) + if n > 0 { + written += int(n) + } + if err != nil { + break + } + } + return written, err +} diff --git a/pkg/sentry/fs/file_operations.go b/pkg/sentry/fs/file_operations.go new file mode 100644 index 000000000..beba0f771 --- /dev/null +++ b/pkg/sentry/fs/file_operations.go @@ -0,0 +1,175 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "io" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// SpliceOpts define how a splice works. +type SpliceOpts struct { + // Length is the length of the splice operation. + Length int64 + + // SrcOffset indicates whether the existing source file offset should + // be used. If this is true, then the Start value below is used. + // + // When passed to FileOperations object, this should always be true as + // the offset will be provided by a layer above, unless the object in + // question is a pipe or socket. This value can be relied upon for such + // an indicator. + SrcOffset bool + + // SrcStart is the start of the source file. This is used only if + // SrcOffset is false. + SrcStart int64 + + // Dup indicates that the contents should not be consumed from the + // source (e.g. in the case of a socket or a pipe), but duplicated. + Dup bool + + // DstOffset indicates that the destination file offset should be used. + // + // See SrcOffset for additional information. + DstOffset bool + + // DstStart is the start of the destination file. This is used only if + // DstOffset is false. + DstStart int64 +} + +// FileOperations are operations on a File that diverge per file system. +// +// Operations that take a *File may use only the following interfaces: +// +// - File.UniqueID: Operations may only read this value. +// - File.Dirent: Operations must not take or drop a reference. +// - File.Offset(): This value is guaranteed to not change for the +// duration of the operation. +// - File.Flags(): This value may change during the operation. +type FileOperations interface { + // Release release resources held by FileOperations. + Release() + + // Waitable defines how this File can be waited on for read and + // write readiness. + waiter.Waitable + + // Seek seeks to offset based on SeekWhence. Returns the new + // offset or no change in the offset and an error. + Seek(ctx context.Context, file *File, whence SeekWhence, offset int64) (int64, error) + + // Readdir reads the directory entries of file and serializes them + // using serializer. + // + // Returns the new directory offset or no change in the offset and + // an error. The offset returned must not be less than file.Offset(). + // + // Serialization of directory entries must not happen asynchronously. + Readdir(ctx context.Context, file *File, serializer DentrySerializer) (int64, error) + + // Read reads from file into dst at offset and returns the number + // of bytes read which must be greater than or equal to 0. File + // systems that do not support reading at an offset, (i.e. pipefs, + // sockfs) may ignore the offset. These file systems are expected + // to construct Files with !FileFlags.Pread. + // + // Read may return a nil error and only partially fill dst (at or + // before EOF). If the file represents a symlink, Read reads the target + // value of the symlink. + // + // Read does not check permissions nor flags. + // + // Read must not be called if !FileFlags.Read. + Read(ctx context.Context, file *File, dst usermem.IOSequence, offset int64) (int64, error) + + // WriteTo is a variant of read that takes another file as a + // destination. For a splice (copy or move from one file to another), + // first a WriteTo on the source is attempted, followed by a ReadFrom + // on the destination, following by a buffered copy with standard Read + // and Write operations. + // + // If dup is set, the data should be duplicated into the destination + // and retained. + // + // The same preconditions as Read apply. + WriteTo(ctx context.Context, file *File, dst io.Writer, count int64, dup bool) (int64, error) + + // Write writes src to file at offset and returns the number of bytes + // written which must be greater than or equal to 0. Like Read, file + // systems that do not support writing at an offset (i.e. pipefs, sockfs) + // may ignore the offset. These file systems are expected to construct + // Files with !FileFlags.Pwrite. + // + // If only part of src could be written, Write must return an error + // indicating why (e.g. syserror.ErrWouldBlock). + // + // Write does not check permissions nor flags. + // + // Write must not be called if !FileFlags.Write. + Write(ctx context.Context, file *File, src usermem.IOSequence, offset int64) (int64, error) + + // ReadFrom is a variant of write that takes a another file as a + // source. See WriteTo for details regarding how this is called. + // + // The same preconditions as Write apply; FileFlags.Write must be set. + ReadFrom(ctx context.Context, file *File, src io.Reader, count int64) (int64, error) + + // Fsync writes buffered modifications of file and/or flushes in-flight + // operations to backing storage based on syncType. The range to sync is + // [start, end]. The end is inclusive so that the last byte of a maximally + // sized file can be synced. + Fsync(ctx context.Context, file *File, start, end int64, syncType SyncType) error + + // Flush this file's buffers/state (on close(2)). + Flush(ctx context.Context, file *File) error + + // ConfigureMMap mutates opts to implement mmap(2) for the file. Most + // implementations can either embed fsutil.FileNoMMap (if they don't support + // memory mapping) or call fsutil.GenericConfigureMMap with the appropriate + // memmap.Mappable. + ConfigureMMap(ctx context.Context, file *File, opts *memmap.MMapOpts) error + + // UnstableAttr returns the "unstable" attributes of the inode represented + // by the file. Most implementations can embed + // fsutil.FileUseInodeUnstableAttr, which delegates to + // InodeOperations.UnstableAttr. + UnstableAttr(ctx context.Context, file *File) (UnstableAttr, error) + + // Ioctl implements the ioctl(2) linux syscall. + // + // io provides access to the virtual memory space to which pointers in args + // refer. + // + // Preconditions: The AddressSpace (if any) that io refers to is activated. + Ioctl(ctx context.Context, file *File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) +} + +// FifoSizer is an interface for setting and getting the size of a pipe. +type FifoSizer interface { + // FifoSize returns the pipe capacity in bytes. + FifoSize(ctx context.Context, file *File) (int64, error) + + // SetFifoSize sets the new pipe capacity in bytes. + // + // The new size is returned (which may be capped). + SetFifoSize(size int64) (int64, error) +} diff --git a/pkg/sentry/fs/file_overlay.go b/pkg/sentry/fs/file_overlay.go new file mode 100644 index 000000000..dcc1df38f --- /dev/null +++ b/pkg/sentry/fs/file_overlay.go @@ -0,0 +1,556 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "io" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// overlayFile gets a handle to a file from the upper or lower filesystem +// in an overlay. The caller is responsible for calling File.DecRef on +// the returned file. +func overlayFile(ctx context.Context, inode *Inode, flags FileFlags) (*File, error) { + // Do a song and dance to eventually get to: + // + // File -> single reference + // Dirent -> single reference + // Inode -> multiple references + // + // So that File.DecRef() -> File.destroy -> Dirent.DecRef -> Dirent.destroy, + // and both the transitory File and Dirent can be GC'ed but the Inode + // remains. + + // Take another reference on the Inode. + inode.IncRef() + + // Start with a single reference on the Dirent. It inherits the reference + // we just took on the Inode above. + dirent := NewTransientDirent(inode) + + // Get a File. This will take another reference on the Dirent. + f, err := inode.GetFile(ctx, dirent, flags) + + // Drop the extra reference on the Dirent. Now there's only one reference + // on the dirent, either owned by f (if non-nil), or the Dirent is about + // to be destroyed (if GetFile failed). + dirent.DecRef() + + return f, err +} + +// overlayFileOperations implements FileOperations for a file in an overlay. +// +// +stateify savable +type overlayFileOperations struct { + // upperMu protects upper below. In contrast lower is stable. + upperMu sync.Mutex `state:"nosave"` + + // We can't share Files in upper and lower filesystems between all Files + // in an overlay because some file systems expect to get distinct handles + // that are not consistent with each other on open(2). + // + // So we lazily acquire an upper File when the overlayEntry acquires an + // upper Inode (it might have one from the start). This synchronizes with + // copy up. + // + // If upper is non-nil and this is not a directory, then lower is ignored. + // + // For directories, upper and lower are ignored because it is always + // necessary to acquire new directory handles so that the directory cursors + // of the upper and lower Files are not exhausted. + upper *File + lower *File + + // dirCursor is a directory cursor for a directory in an overlay. It is + // protected by File.mu of the owning file, which is held during + // Readdir and Seek calls. + dirCursor string +} + +// Release implements FileOperations.Release. +func (f *overlayFileOperations) Release() { + if f.upper != nil { + f.upper.DecRef() + } + if f.lower != nil { + f.lower.DecRef() + } +} + +// EventRegister implements FileOperations.EventRegister. +func (f *overlayFileOperations) EventRegister(we *waiter.Entry, mask waiter.EventMask) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + f.upper.EventRegister(we, mask) + return + } + f.lower.EventRegister(we, mask) +} + +// EventUnregister implements FileOperations.Unregister. +func (f *overlayFileOperations) EventUnregister(we *waiter.Entry) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + f.upper.EventUnregister(we) + return + } + f.lower.EventUnregister(we) +} + +// Readiness implements FileOperations.Readiness. +func (f *overlayFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + return f.upper.Readiness(mask) + } + return f.lower.Readiness(mask) +} + +// Seek implements FileOperations.Seek. +func (f *overlayFileOperations) Seek(ctx context.Context, file *File, whence SeekWhence, offset int64) (int64, error) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + + var seekDir bool + var n int64 + if f.upper != nil { + var err error + if n, err = f.upper.FileOperations.Seek(ctx, file, whence, offset); err != nil { + return n, err + } + seekDir = IsDir(f.upper.Dirent.Inode.StableAttr) + } else { + var err error + if n, err = f.lower.FileOperations.Seek(ctx, file, whence, offset); err != nil { + return n, err + } + seekDir = IsDir(f.lower.Dirent.Inode.StableAttr) + } + + // If this was a seek on a directory, we must update the cursor. + if seekDir && whence == SeekSet && offset == 0 { + // Currently only seeking to 0 on a directory is supported. + // FIXME(b/33075855): Lift directory seeking limitations. + f.dirCursor = "" + } + return n, nil +} + +// Readdir implements FileOperations.Readdir. +func (f *overlayFileOperations) Readdir(ctx context.Context, file *File, serializer DentrySerializer) (int64, error) { + root := RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + + dirCtx := &DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + return DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) +} + +// IterateDir implements DirIterator.IterateDir. +func (f *overlayFileOperations) IterateDir(ctx context.Context, d *Dirent, dirCtx *DirCtx, offset int) (int, error) { + o := d.Inode.overlay + + if !d.Inode.MountSource.CacheReaddir() { + // Can't use the dirCache. Simply read the entries. + entries, err := readdirEntries(ctx, o) + if err != nil { + return offset, err + } + n, err := GenericReaddir(dirCtx, entries) + return offset + n, err + } + + // Otherwise, use or create cached entries. + + o.dirCacheMu.RLock() + if o.dirCache != nil { + n, err := GenericReaddir(dirCtx, o.dirCache) + o.dirCacheMu.RUnlock() + return offset + n, err + } + o.dirCacheMu.RUnlock() + + // readdirEntries holds o.copyUpMu to ensure that copy-up does not + // occur while calculating the readdir results. + // + // However, it is possible for a copy-up to occur after the call to + // readdirEntries, but before setting o.dirCache. This is OK, since + // copy-up does not change the children in a way that would affect the + // children returned in dirCache. Copy-up only moves files/directories + // between layers in the overlay. + // + // We must hold dirCacheMu around both readdirEntries and setting + // o.dirCache to synchronize with dirCache invalidations done by + // Create, Remove, Rename. + o.dirCacheMu.Lock() + + // We expect dirCache to be nil (we just checked above), but there is a + // chance that a racing call managed to just set it, in which case we + // can use that new value. + if o.dirCache == nil { + dirCache, err := readdirEntries(ctx, o) + if err != nil { + o.dirCacheMu.Unlock() + return offset, err + } + o.dirCache = dirCache + } + + o.dirCacheMu.DowngradeLock() + n, err := GenericReaddir(dirCtx, o.dirCache) + o.dirCacheMu.RUnlock() + + return offset + n, err +} + +// onTop performs the given operation on the top-most available layer. +func (f *overlayFileOperations) onTop(ctx context.Context, file *File, fn func(*File, FileOperations) error) error { + file.Dirent.Inode.overlay.copyMu.RLock() + defer file.Dirent.Inode.overlay.copyMu.RUnlock() + + // Only lower layer is available. + if file.Dirent.Inode.overlay.upper == nil { + return fn(f.lower, f.lower.FileOperations) + } + + f.upperMu.Lock() + if f.upper == nil { + upper, err := overlayFile(ctx, file.Dirent.Inode.overlay.upper, file.Flags()) + if err != nil { + // Something very wrong; return a generic filesystem + // error to avoid propagating internals. + f.upperMu.Unlock() + return syserror.EIO + } + + // Save upper file. + f.upper = upper + } + f.upperMu.Unlock() + + return fn(f.upper, f.upper.FileOperations) +} + +// Read implements FileOperations.Read. +func (f *overlayFileOperations) Read(ctx context.Context, file *File, dst usermem.IOSequence, offset int64) (n int64, err error) { + err = f.onTop(ctx, file, func(file *File, ops FileOperations) error { + n, err = ops.Read(ctx, file, dst, offset) + return err // Will overwrite itself. + }) + return +} + +// WriteTo implements FileOperations.WriteTo. +func (f *overlayFileOperations) WriteTo(ctx context.Context, file *File, dst io.Writer, count int64, dup bool) (n int64, err error) { + err = f.onTop(ctx, file, func(file *File, ops FileOperations) error { + n, err = ops.WriteTo(ctx, file, dst, count, dup) + return err // Will overwrite itself. + }) + return +} + +// Write implements FileOperations.Write. +func (f *overlayFileOperations) Write(ctx context.Context, file *File, src usermem.IOSequence, offset int64) (int64, error) { + // f.upper must be non-nil. See inode_overlay.go:overlayGetFile, where the + // file is copied up and opened in the upper filesystem if FileFlags.Write. + // Write cannot be called if !FileFlags.Write, see FileOperations.Write. + return f.upper.FileOperations.Write(ctx, f.upper, src, offset) +} + +// ReadFrom implements FileOperations.ReadFrom. +func (f *overlayFileOperations) ReadFrom(ctx context.Context, file *File, src io.Reader, count int64) (n int64, err error) { + // See above; f.upper must be non-nil. + return f.upper.FileOperations.ReadFrom(ctx, f.upper, src, count) +} + +// Fsync implements FileOperations.Fsync. +func (f *overlayFileOperations) Fsync(ctx context.Context, file *File, start, end int64, syncType SyncType) (err error) { + f.upperMu.Lock() + if f.upper != nil { + err = f.upper.FileOperations.Fsync(ctx, f.upper, start, end, syncType) + } + f.upperMu.Unlock() + if err == nil && f.lower != nil { + // N.B. Fsync on the lower filesystem can cause writes of file + // attributes (i.e. access time) despite the fact that we must + // treat the lower filesystem as read-only. + // + // This matches the semantics of fsync(2) in Linux overlayfs. + err = f.lower.FileOperations.Fsync(ctx, f.lower, start, end, syncType) + } + return err +} + +// Flush implements FileOperations.Flush. +func (f *overlayFileOperations) Flush(ctx context.Context, file *File) (err error) { + // Flush whatever handles we have. + f.upperMu.Lock() + if f.upper != nil { + err = f.upper.FileOperations.Flush(ctx, f.upper) + } + f.upperMu.Unlock() + if err == nil && f.lower != nil { + err = f.lower.FileOperations.Flush(ctx, f.lower) + } + return err +} + +// ConfigureMMap implements FileOperations.ConfigureMMap. +func (*overlayFileOperations) ConfigureMMap(ctx context.Context, file *File, opts *memmap.MMapOpts) error { + o := file.Dirent.Inode.overlay + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + // If there is no lower inode, the overlay will never need to do a + // copy-up, and thus will never need to invalidate any mappings. We can + // call ConfigureMMap directly on the upper file. + if o.lower == nil { + f := file.FileOperations.(*overlayFileOperations) + if err := f.upper.ConfigureMMap(ctx, opts); err != nil { + return err + } + + // ConfigureMMap will set the MappableIdentity to the upper + // file and take a reference on it, but we must also hold a + // reference to the overlay file during the lifetime of the + // Mappable. If we do not do this, the overlay file can be + // Released before the upper file is Released, and we will be + // unable to traverse to the upper file during Save, thus + // preventing us from saving a proper inode mapping for the + // file. + file.IncRef() + id := overlayMappingIdentity{ + id: opts.MappingIdentity, + overlayFile: file, + } + id.EnableLeakCheck("fs.overlayMappingIdentity") + + // Swap out the old MappingIdentity for the wrapped one. + opts.MappingIdentity = &id + return nil + } + + if !o.isMappableLocked() { + return syserror.ENODEV + } + + // FIXME(jamieliu): This is a copy/paste of fsutil.GenericConfigureMMap, + // which we can't use because the overlay implementation is in package fs, + // so depending on fs/fsutil would create a circular dependency. Move + // overlay to fs/overlay. + opts.Mappable = o + opts.MappingIdentity = file + file.IncRef() + return nil +} + +// UnstableAttr implements fs.FileOperations.UnstableAttr. +func (f *overlayFileOperations) UnstableAttr(ctx context.Context, file *File) (UnstableAttr, error) { + // Hot path. Avoid defers. + f.upperMu.Lock() + if f.upper != nil { + attr, err := f.upper.UnstableAttr(ctx) + f.upperMu.Unlock() + return attr, err + } + f.upperMu.Unlock() + + // It's possible that copy-up has occurred, but we haven't opened a upper + // file yet. If this is the case, just use the upper inode's UnstableAttr + // rather than opening a file. + o := file.Dirent.Inode.overlay + o.copyMu.RLock() + if o.upper != nil { + attr, err := o.upper.UnstableAttr(ctx) + o.copyMu.RUnlock() + return attr, err + } + o.copyMu.RUnlock() + + return f.lower.UnstableAttr(ctx) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (f *overlayFileOperations) Ioctl(ctx context.Context, overlayFile *File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + + if f.upper == nil { + // It's possible that ioctl changes the file. Since we don't know all + // possible ioctls, only allow them to propagate to the upper. Triggering a + // copy up on any ioctl would be too drastic. In the future, it can have a + // list of ioctls that are safe to send to lower and a list that triggers a + // copy up. + return 0, syserror.ENOTTY + } + return f.upper.FileOperations.Ioctl(ctx, f.upper, io, args) +} + +// FifoSize implements FifoSizer.FifoSize. +func (f *overlayFileOperations) FifoSize(ctx context.Context, overlayFile *File) (rv int64, err error) { + err = f.onTop(ctx, overlayFile, func(file *File, ops FileOperations) error { + sz, ok := ops.(FifoSizer) + if !ok { + return syserror.EINVAL + } + rv, err = sz.FifoSize(ctx, file) + return err + }) + return +} + +// SetFifoSize implements FifoSizer.SetFifoSize. +func (f *overlayFileOperations) SetFifoSize(size int64) (rv int64, err error) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + + if f.upper == nil { + // Named pipes cannot be copied up and changes to the lower are prohibited. + return 0, syserror.EINVAL + } + sz, ok := f.upper.FileOperations.(FifoSizer) + if !ok { + return 0, syserror.EINVAL + } + return sz.SetFifoSize(size) +} + +// readdirEntries returns a sorted map of directory entries from the +// upper and/or lower filesystem. +func readdirEntries(ctx context.Context, o *overlayEntry) (*SortedDentryMap, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + // Assert that there is at least one upper or lower entry. + if o.upper == nil && o.lower == nil { + panic("invalid overlayEntry, needs at least one Inode") + } + entries := make(map[string]DentAttr) + + // Try the upper filesystem first. + if o.upper != nil { + var err error + entries, err = readdirOne(ctx, NewTransientDirent(o.upper)) + if err != nil { + return nil, err + } + } + + // Try the lower filesystem next. + if o.lower != nil { + lowerEntries, err := readdirOne(ctx, NewTransientDirent(o.lower)) + if err != nil { + return nil, err + } + for name, entry := range lowerEntries { + // Skip this name if it is a negative entry in the + // upper or there exists a whiteout for it. + if o.upper != nil { + if overlayHasWhiteout(ctx, o.upper, name) { + continue + } + } + // Prefer the entries from the upper filesystem + // when names overlap. + if _, ok := entries[name]; !ok { + entries[name] = entry + } + } + } + + // Sort and return the entries. + return NewSortedDentryMap(entries), nil +} + +// readdirOne reads all of the directory entries from d. +func readdirOne(ctx context.Context, d *Dirent) (map[string]DentAttr, error) { + dir, err := d.Inode.GetFile(ctx, d, FileFlags{Read: true}) + if err != nil { + return nil, err + } + defer dir.DecRef() + + // Use a stub serializer to read the entries into memory. + stubSerializer := &CollectEntriesSerializer{} + if err := dir.Readdir(ctx, stubSerializer); err != nil { + return nil, err + } + // The "." and ".." entries are from the overlay Inode's Dirent, not the stub. + delete(stubSerializer.Entries, ".") + delete(stubSerializer.Entries, "..") + return stubSerializer.Entries, nil +} + +// overlayMappingIdentity wraps a MappingIdentity, and also holds a reference +// on a file during its lifetime. +// +// +stateify savable +type overlayMappingIdentity struct { + refs.AtomicRefCount + id memmap.MappingIdentity + overlayFile *File +} + +// DecRef implements AtomicRefCount.DecRef. +func (omi *overlayMappingIdentity) DecRef() { + omi.AtomicRefCount.DecRefWithDestructor(func() { + omi.overlayFile.DecRef() + omi.id.DecRef() + }) +} + +// DeviceID implements MappingIdentity.DeviceID using the device id from the +// overlayFile. +func (omi *overlayMappingIdentity) DeviceID() uint64 { + return omi.overlayFile.Dirent.Inode.StableAttr.DeviceID +} + +// DeviceID implements MappingIdentity.InodeID using the inode id from the +// overlayFile. +func (omi *overlayMappingIdentity) InodeID() uint64 { + return omi.overlayFile.Dirent.Inode.StableAttr.InodeID +} + +// MappedName implements MappingIdentity.MappedName. +func (omi *overlayMappingIdentity) MappedName(ctx context.Context) string { + root := RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + name, _ := omi.overlayFile.Dirent.FullName(root) + return name +} + +// Msync implements MappingIdentity.Msync. +func (omi *overlayMappingIdentity) Msync(ctx context.Context, mr memmap.MappableRange) error { + return omi.id.Msync(ctx, mr) +} diff --git a/pkg/sentry/fs/file_overlay_test.go b/pkg/sentry/fs/file_overlay_test.go new file mode 100644 index 000000000..1971cc680 --- /dev/null +++ b/pkg/sentry/fs/file_overlay_test.go @@ -0,0 +1,192 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "reflect" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" +) + +func TestReaddir(t *testing.T) { + ctx := contexttest.Context(t) + ctx = &rootContext{ + Context: ctx, + root: fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root"), + } + for _, test := range []struct { + // Test description. + desc string + + // Lookup parameters. + dir *fs.Inode + + // Want from lookup. + err error + names []string + }{ + { + desc: "no upper, lower has entries", + dir: fs.NewTestOverlayDir(ctx, + nil, /* upper */ + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + {name: "b"}, + }, nil), /* lower */ + false /* revalidate */), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper has entries, no lower", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + {name: "b"}, + }, nil), /* upper */ + nil, /* lower */ + false /* revalidate */), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper and lower, entries combine", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + {name: "b"}, + }, nil), /* lower */ + false /* revalidate */), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper and lower, entries combine, none are masked", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, []string{"b"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + {name: "c"}, + }, nil), /* lower */ + false /* revalidate */), + names: []string{".", "..", "a", "c"}, + }, + { + desc: "upper and lower, entries combine, upper masks some of lower", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, []string{"b"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + {name: "b"}, /* will be masked */ + {name: "c"}, + }, nil), /* lower */ + false /* revalidate */), + names: []string{".", "..", "a", "c"}, + }, + } { + t.Run(test.desc, func(t *testing.T) { + openDir, err := test.dir.GetFile(ctx, fs.NewDirent(ctx, test.dir, "stub"), fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("GetFile got error %v, want nil", err) + } + stubSerializer := &fs.CollectEntriesSerializer{} + err = openDir.Readdir(ctx, stubSerializer) + if err != test.err { + t.Fatalf("Readdir got error %v, want nil", err) + } + if err != nil { + return + } + if !reflect.DeepEqual(stubSerializer.Order, test.names) { + t.Errorf("Readdir got names %v, want %v", stubSerializer.Order, test.names) + } + }) + } +} + +func TestReaddirRevalidation(t *testing.T) { + ctx := contexttest.Context(t) + ctx = &rootContext{ + Context: ctx, + root: fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root"), + } + + // Create an overlay with two directories, each with one file. + upper := newTestRamfsDir(ctx, []dirContent{{name: "a"}}, nil) + lower := newTestRamfsDir(ctx, []dirContent{{name: "b"}}, nil) + overlay := fs.NewTestOverlayDir(ctx, upper, lower, true /* revalidate */) + + // Get a handle to the dirent in the upper filesystem so that we can + // modify it without going through the dirent. + upperDir := upper.InodeOperations.(*dir).InodeOperations.(*ramfs.Dir) + + // Check that overlay returns the files from both upper and lower. + openDir, err := overlay.GetFile(ctx, fs.NewDirent(ctx, overlay, "stub"), fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("GetFile got error %v, want nil", err) + } + ser := &fs.CollectEntriesSerializer{} + if err := openDir.Readdir(ctx, ser); err != nil { + t.Fatalf("Readdir got error %v, want nil", err) + } + got, want := ser.Order, []string{".", "..", "a", "b"} + if !reflect.DeepEqual(got, want) { + t.Errorf("Readdir got names %v, want %v", got, want) + } + + // Remove "a" from the upper and add "c". + if err := upperDir.Remove(ctx, upper, "a"); err != nil { + t.Fatalf("error removing child: %v", err) + } + upperDir.AddChild(ctx, "c", fs.NewInode(ctx, fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermissions{}, 0), + upper.MountSource, fs.StableAttr{Type: fs.RegularFile})) + + // Seek to beginning of the directory and do the readdir again. + if _, err := openDir.Seek(ctx, fs.SeekSet, 0); err != nil { + t.Fatalf("error seeking to beginning of dir: %v", err) + } + ser = &fs.CollectEntriesSerializer{} + if err := openDir.Readdir(ctx, ser); err != nil { + t.Fatalf("Readdir got error %v, want nil", err) + } + + // Readdir should return the updated children. + got, want = ser.Order, []string{".", "..", "b", "c"} + if !reflect.DeepEqual(got, want) { + t.Errorf("Readdir got names %v, want %v", got, want) + } +} + +type rootContext struct { + context.Context + root *fs.Dirent +} + +// Value implements context.Context. +func (r *rootContext) Value(key interface{}) interface{} { + switch key { + case fs.CtxRoot: + r.root.IncRef() + return r.root + default: + return r.Context.Value(key) + } +} diff --git a/pkg/sentry/fs/file_state.go b/pkg/sentry/fs/file_state.go new file mode 100644 index 000000000..523182d59 --- /dev/null +++ b/pkg/sentry/fs/file_state.go @@ -0,0 +1,31 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// beforeSave is invoked by stateify. +func (f *File) beforeSave() { + f.saving = true + if f.flags.Async && f.async != nil { + f.async.Unregister(f) + } +} + +// afterLoad is invoked by stateify. +func (f *File) afterLoad() { + f.mu.Init() + if f.flags.Async && f.async != nil { + f.async.Register(f) + } +} diff --git a/pkg/sentry/fs/filesystems.go b/pkg/sentry/fs/filesystems.go new file mode 100644 index 000000000..d41f30bbb --- /dev/null +++ b/pkg/sentry/fs/filesystems.go @@ -0,0 +1,160 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sort" + "strings" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sync" +) + +// FilesystemFlags matches include/linux/fs.h:file_system_type.fs_flags. +type FilesystemFlags int + +const ( + // FilesystemRequiresDev indicates that the file system requires a device name + // on mount. It is used to construct the output of /proc/filesystems. + FilesystemRequiresDev FilesystemFlags = 1 + + // Currently other flags are not used, but can be pulled in from + // include/linux/fs.h:file_system_type as needed. +) + +// Filesystem is a mountable file system. +type Filesystem interface { + // Name is the unique identifier of the file system. It corresponds to the + // filesystemtype argument of sys_mount and will appear in the output of + // /proc/filesystems. + Name() string + + // Flags indicate common properties of the file system. + Flags() FilesystemFlags + + // Mount generates a mountable Inode backed by device and configured + // using file system independent flags and file system dependent + // data options. + // + // Mount may return arbitrary errors. They do not need syserr translations. + Mount(ctx context.Context, device string, flags MountSourceFlags, data string, dataObj interface{}) (*Inode, error) + + // AllowUserMount determines whether mount(2) is allowed to mount a + // file system of this type. + AllowUserMount() bool + + // AllowUserList determines whether this filesystem is listed in + // /proc/filesystems + AllowUserList() bool +} + +// filesystems is the global set of registered file systems. It does not need +// to be saved. Packages registering and unregistering file systems must do so +// before calling save/restore methods. +var filesystems = struct { + // mu protects registered below. + mu sync.Mutex + + // registered is a set of registered Filesystems. + registered map[string]Filesystem +}{ + registered: make(map[string]Filesystem), +} + +// RegisterFilesystem registers a new file system that is visible to mount and +// the /proc/filesystems list. Packages implementing Filesystem should call +// RegisterFilesystem in init(). +func RegisterFilesystem(f Filesystem) { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + if _, ok := filesystems.registered[f.Name()]; ok { + panic(fmt.Sprintf("filesystem already registered at %q", f.Name())) + } + filesystems.registered[f.Name()] = f +} + +// FindFilesystem returns a Filesystem registered at name or (nil, false) if name +// is not a file system type that can be found in /proc/filesystems. +func FindFilesystem(name string) (Filesystem, bool) { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + f, ok := filesystems.registered[name] + return f, ok +} + +// GetFilesystems returns the set of registered filesystems in a consistent order. +func GetFilesystems() []Filesystem { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + var ss []Filesystem + for _, s := range filesystems.registered { + ss = append(ss, s) + } + sort.Slice(ss, func(i, j int) bool { return ss[i].Name() < ss[j].Name() }) + return ss +} + +// MountSourceFlags represents all mount option flags as a struct. +// +// +stateify savable +type MountSourceFlags struct { + // ReadOnly corresponds to mount(2)'s "MS_RDONLY" and indicates that + // the filesystem should be mounted read-only. + ReadOnly bool + + // NoAtime corresponds to mount(2)'s "MS_NOATIME" and indicates that + // the filesystem should not update access time in-place. + NoAtime bool + + // ForcePageCache causes all filesystem I/O operations to use the page + // cache, even when the platform supports direct mapped I/O. This + // doesn't correspond to any Linux mount options. + ForcePageCache bool + + // NoExec corresponds to mount(2)'s "MS_NOEXEC" and indicates that + // binaries from this file system can't be executed. + NoExec bool +} + +// GenericMountSourceOptions splits a string containing comma separated tokens of the +// format 'key=value' or 'key' into a map of keys and values. For example: +// +// data = "key0=value0,key1,key2=value2" -> map{'key0':'value0','key1':'','key2':'value2'} +// +// If data contains duplicate keys, then the last token wins. +func GenericMountSourceOptions(data string) map[string]string { + options := make(map[string]string) + if len(data) == 0 { + // Don't return a nil map, callers might not be expecting that. + return options + } + + // Parse options and skip empty ones. + for _, opt := range strings.Split(data, ",") { + if len(opt) > 0 { + res := strings.SplitN(opt, "=", 2) + if len(res) == 2 { + options[res[0]] = res[1] + } else { + options[opt] = "" + } + } + } + return options +} diff --git a/pkg/sentry/fs/filetest/BUILD b/pkg/sentry/fs/filetest/BUILD new file mode 100644 index 000000000..a8000e010 --- /dev/null +++ b/pkg/sentry/fs/filetest/BUILD @@ -0,0 +1,19 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "filetest", + testonly = 1, + srcs = ["filetest.go"], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/context", + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/usermem", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/filetest/filetest.go b/pkg/sentry/fs/filetest/filetest.go new file mode 100644 index 000000000..8049538f2 --- /dev/null +++ b/pkg/sentry/fs/filetest/filetest.go @@ -0,0 +1,61 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filetest provides a test implementation of an fs.File. +package filetest + +import ( + "fmt" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/anon" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// TestFileOperations is an implementation of the File interface. It provides all +// required methods. +type TestFileOperations struct { + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FilePipeSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +// NewTestFile creates and initializes a new test file. +func NewTestFile(tb testing.TB) *fs.File { + ctx := contexttest.Context(tb) + dirent := fs.NewDirent(ctx, anon.NewInode(ctx), "test") + return fs.NewFile(ctx, dirent, fs.FileFlags{}, &TestFileOperations{}) +} + +// Read just fails the request. +func (*TestFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, fmt.Errorf("Readv not implemented") +} + +// Write just fails the request. +func (*TestFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, fmt.Errorf("Writev not implemented") +} diff --git a/pkg/sentry/fs/flags.go b/pkg/sentry/fs/flags.go new file mode 100644 index 000000000..4338ae1fa --- /dev/null +++ b/pkg/sentry/fs/flags.go @@ -0,0 +1,138 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" +) + +// FileFlags encodes file flags. +// +// +stateify savable +type FileFlags struct { + // Direct indicates that I/O should be done directly. + Direct bool + + // NonBlocking indicates that I/O should not block. + NonBlocking bool + + // DSync indicates that each write will flush data and metadata required to + // read the file's contents. + DSync bool + + // Sync indicates that each write will flush data and all file metadata. + Sync bool + + // Append indicates this file is append only. + Append bool + + // Read indicates this file is readable. + Read bool + + // Write indicates this file is writeable. + Write bool + + // Pread indicates this file is readable at an arbitrary offset. + Pread bool + + // Pwrite indicates this file is writable at an arbitrary offset. + Pwrite bool + + // Directory indicates that this file must be a directory. + Directory bool + + // Async indicates that this file sends signals on IO events. + Async bool + + // LargeFile indicates that this file should be opened even if it has + // size greater than linux's off_t. When running in 64-bit mode, + // Linux sets this flag for all files. Since gVisor is only compatible + // with 64-bit Linux, it also sets this flag for all files. + LargeFile bool + + // NonSeekable indicates that file.offset isn't used. + NonSeekable bool + + // Truncate indicates that the file should be truncated before opened. + // This is only applicable if the file is regular. + Truncate bool +} + +// SettableFileFlags is a subset of FileFlags above that can be changed +// via fcntl(2) using the F_SETFL command. +type SettableFileFlags struct { + // Direct indicates that I/O should be done directly. + Direct bool + + // NonBlocking indicates that I/O should not block. + NonBlocking bool + + // Append indicates this file is append only. + Append bool + + // Async indicates that this file sends signals on IO events. + Async bool +} + +// Settable returns the subset of f that are settable. +func (f FileFlags) Settable() SettableFileFlags { + return SettableFileFlags{ + Direct: f.Direct, + NonBlocking: f.NonBlocking, + Append: f.Append, + Async: f.Async, + } +} + +// ToLinux converts a FileFlags object to a Linux representation. +func (f FileFlags) ToLinux() (mask uint) { + if f.Direct { + mask |= linux.O_DIRECT + } + if f.NonBlocking { + mask |= linux.O_NONBLOCK + } + if f.DSync { + mask |= linux.O_DSYNC + } + if f.Sync { + mask |= linux.O_SYNC + } + if f.Append { + mask |= linux.O_APPEND + } + if f.Directory { + mask |= linux.O_DIRECTORY + } + if f.Async { + mask |= linux.O_ASYNC + } + if f.LargeFile { + mask |= linux.O_LARGEFILE + } + if f.Truncate { + mask |= linux.O_TRUNC + } + + switch { + case f.Read && f.Write: + mask |= linux.O_RDWR + case f.Write: + mask |= linux.O_WRONLY + case f.Read: + mask |= linux.O_RDONLY + } + return +} diff --git a/pkg/sentry/fs/fs.go b/pkg/sentry/fs/fs.go new file mode 100644 index 000000000..d2dbff268 --- /dev/null +++ b/pkg/sentry/fs/fs.go @@ -0,0 +1,161 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fs implements a virtual filesystem layer. +// +// Specific filesystem implementations must implement the InodeOperations +// interface (inode.go). +// +// The MountNamespace (mounts.go) is used to create a collection of mounts in +// a filesystem rooted at a given Inode. +// +// MountSources (mount.go) form a tree, with each mount holding pointers to its +// parent and children. +// +// Dirents (dirents.go) wrap Inodes in a caching layer. +// +// When multiple locks are to be held at the same time, they should be acquired +// in the following order. +// +// Either: +// File.mu +// Locks in FileOperations implementations +// goto Dirent-Locks +// +// Or: +// MountNamespace.mu +// goto Dirent-Locks +// +// Dirent-Locks: +// renameMu +// Dirent.dirMu +// Dirent.mu +// DirentCache.mu +// Inode.Watches.mu (see `Inotify` for other lock ordering) +// MountSource.mu +// Inode.appendMu +// Locks in InodeOperations implementations or overlayEntry +// +// If multiple Dirent or MountSource locks must be taken, locks in the parent must be +// taken before locks in their children. +// +// If locks must be taken on multiple unrelated Dirents, renameMu must be taken +// first. See lockForRename. +package fs + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sync" +) + +var ( + // workMu is used to synchronize pending asynchronous work. Async work + // runs with the lock held for reading. AsyncBarrier will take the lock + // for writing, thus ensuring that all Async work completes before + // AsyncBarrier returns. + workMu sync.RWMutex + + // asyncError is used to store up to one asynchronous execution error. + asyncError = make(chan error, 1) +) + +// AsyncBarrier waits for all outstanding asynchronous work to complete. +func AsyncBarrier() { + workMu.Lock() + workMu.Unlock() +} + +// Async executes a function asynchronously. +// +// Async must not be called recursively. +func Async(f func()) { + workMu.RLock() + go func() { // S/R-SAFE: AsyncBarrier must be called. + defer workMu.RUnlock() // Ensure RUnlock in case of panic. + f() + }() +} + +// AsyncWithContext is just like Async, except that it calls the asynchronous +// function with the given context as argument. This function exists to avoid +// needing to allocate an extra function on the heap in a hot path. +func AsyncWithContext(ctx context.Context, f func(context.Context)) { + workMu.RLock() + go func() { // S/R-SAFE: AsyncBarrier must be called. + defer workMu.RUnlock() // Ensure RUnlock in case of panic. + f(ctx) + }() +} + +// AsyncErrorBarrier waits for all outstanding asynchronous work to complete, or +// the first async error to arrive. Other unfinished async executions will +// continue in the background. Other past and future async errors are ignored. +func AsyncErrorBarrier() error { + wait := make(chan struct{}, 1) + go func() { // S/R-SAFE: Does not touch persistent state. + AsyncBarrier() + wait <- struct{}{} + }() + select { + case <-wait: + select { + case err := <-asyncError: + return err + default: + return nil + } + case err := <-asyncError: + return err + } +} + +// CatchError tries to capture the potential async error returned by the +// function. At most one async error will be captured globally so excessive +// errors will be dropped. +func CatchError(f func() error) func() { + return func() { + if err := f(); err != nil { + select { + case asyncError <- err: + default: + log.Warningf("excessive async error dropped: %v", err) + } + } + } +} + +// ErrSaveRejection indicates a failed save due to unsupported file system state +// such as dangling open fd, etc. +type ErrSaveRejection struct { + // Err is the wrapped error. + Err error +} + +// Error returns a sensible description of the save rejection error. +func (e ErrSaveRejection) Error() string { + return "save rejected due to unsupported file system state: " + e.Err.Error() +} + +// ErrCorruption indicates a failed restore due to external file system state in +// corruption. +type ErrCorruption struct { + // Err is the wrapped error. + Err error +} + +// Error returns a sensible description of the restore error. +func (e ErrCorruption) Error() string { + return "restore failed due to external file system state in corruption: " + e.Err.Error() +} diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD new file mode 100644 index 000000000..789369220 --- /dev/null +++ b/pkg/sentry/fs/fsutil/BUILD @@ -0,0 +1,118 @@ +load("//tools:defs.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +package(licenses = ["notice"]) + +go_template_instance( + name = "dirty_set_impl", + out = "dirty_set_impl.go", + imports = { + "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap", + "platform": "gvisor.dev/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "Dirty", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "memmap.MappableRange", + "Value": "DirtyInfo", + "Functions": "dirtySetFunctions", + }, +) + +go_template_instance( + name = "frame_ref_set_impl", + out = "frame_ref_set_impl.go", + imports = { + "platform": "gvisor.dev/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "FrameRef", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "platform.FileRange", + "Value": "uint64", + "Functions": "FrameRefSetFunctions", + }, +) + +go_template_instance( + name = "file_range_set_impl", + out = "file_range_set_impl.go", + imports = { + "memmap": "gvisor.dev/gvisor/pkg/sentry/memmap", + "platform": "gvisor.dev/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "FileRange", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "memmap.MappableRange", + "Value": "uint64", + "Functions": "FileRangeSetFunctions", + }, +) + +go_library( + name = "fsutil", + srcs = [ + "dirty_set.go", + "dirty_set_impl.go", + "file.go", + "file_range_set.go", + "file_range_set_impl.go", + "frame_ref_set.go", + "frame_ref_set_impl.go", + "fsutil.go", + "host_file_mapper.go", + "host_file_mapper_state.go", + "host_file_mapper_unsafe.go", + "host_mappable.go", + "inode.go", + "inode_cached.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/log", + "//pkg/safemem", + "//pkg/sentry/arch", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/pgalloc", + "//pkg/sentry/platform", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/usage", + "//pkg/state", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "fsutil_test", + size = "small", + srcs = [ + "dirty_set_test.go", + "inode_cached_test.go", + ], + library = ":fsutil", + deps = [ + "//pkg/context", + "//pkg/safemem", + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/syserror", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/fsutil/README.md b/pkg/sentry/fs/fsutil/README.md new file mode 100644 index 000000000..8be367334 --- /dev/null +++ b/pkg/sentry/fs/fsutil/README.md @@ -0,0 +1,207 @@ +This package provides utilities for implementing virtual filesystem objects. + +[TOC] + +## Page cache + +`CachingInodeOperations` implements a page cache for files that cannot use the +host page cache. Normally these are files that store their data in a remote +filesystem. This also applies to files that are accessed on a platform that does +not support directly memory mapping host file descriptors (e.g. the ptrace +platform). + +An `CachingInodeOperations` buffers regions of a single file into memory. It is +owned by an `fs.Inode`, the in-memory representation of a file (all open file +descriptors are backed by an `fs.Inode`). The `fs.Inode` provides operations for +reading memory into an `CachingInodeOperations`, to represent the contents of +the file in-memory, and for writing memory out, to relieve memory pressure on +the kernel and to synchronize in-memory changes to filesystems. + +An `CachingInodeOperations` enables readable and/or writable memory access to +file content. Files can be mapped shared or private, see mmap(2). When a file is +mapped shared, changes to the file via write(2) and truncate(2) are reflected in +the shared memory region. Conversely, when the shared memory region is modified, +changes to the file are visible via read(2). Multiple shared mappings of the +same file are coherent with each other. This is consistent with Linux. + +When a file is mapped private, updates to the mapped memory are not visible to +other memory mappings. Updates to the mapped memory are also not reflected in +the file content as seen by read(2). If the file is changed after a private +mapping is created, for instance by write(2), the change to the file may or may +not be reflected in the private mapping. This is consistent with Linux. + +An `CachingInodeOperations` keeps track of ranges of memory that were modified +(or "dirtied"). When the file is explicitly synced via fsync(2), only the dirty +ranges are written out to the filesystem. Any error returned indicates a failure +to write all dirty memory of an `CachingInodeOperations` to the filesystem. In +this case the filesystem may be in an inconsistent state. The same operation can +be performed on the shared memory itself using msync(2). If neither fsync(2) nor +msync(2) is performed, then the dirty memory is written out in accordance with +the `CachingInodeOperations` eviction strategy (see below) and there is no +guarantee that memory will be written out successfully in full. + +### Memory allocation and eviction + +An `CachingInodeOperations` implements the following allocation and eviction +strategy: + +- Memory is allocated and brought up to date with the contents of a file when + a region of mapped memory is accessed (or "faulted on"). + +- Dirty memory is written out to filesystems when an fsync(2) or msync(2) + operation is performed on a memory mapped file, for all memory mapped files + when saved, and/or when there are no longer any memory mappings of a range + of a file, see munmap(2). As the latter implies, in the absence of a panic + or SIGKILL, dirty memory is written out for all memory mapped files when an + application exits. + +- Memory is freed when there are no longer any memory mappings of a range of a + file (e.g. when an application exits). This behavior is consistent with + Linux for shared memory that has been locked via mlock(2). + +Notably, memory is not allocated for read(2) or write(2) operations. This means +that reads and writes to the file are only accelerated by an +`CachingInodeOperations` if the file being read or written has been memory +mapped *and* if the shared memory has been accessed at the region being read or +written. This diverges from Linux which buffers memory into a page cache on +read(2) proactively (i.e. readahead) and delays writing it out to filesystems on +write(2) (i.e. writeback). The absence of these optimizations is not visible to +applications beyond less than optimal performance when repeatedly reading and/or +writing to same region of a file. See [Future Work](#future-work) for plans to +implement these optimizations. + +Additionally, memory held by `CachingInodeOperationss` is currently unbounded in +size. An `CachingInodeOperations` does not write out dirty memory and free it +under system memory pressure. This can cause pathological memory usage. + +When memory is written back, an `CachingInodeOperations` may write regions of +shared memory that were never modified. This is due to the strategy of +minimizing page faults (see below) and handling only a subset of memory write +faults. In the absence of an application or sentry crash, it is guaranteed that +if a region of shared memory was written to, it is written back to a filesystem. + +### Life of a shared memory mapping + +A file is memory mapped via mmap(2). For example, if `A` is an address, an +application may execute: + +``` +mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); +``` + +This creates a shared mapping of fd that reflects 4k of the contents of fd +starting at offset 0, accessible at address `A`. This in turn creates a virtual +memory area region ("vma") which indicates that [`A`, `A`+0x1000) is now a valid +address range for this application to access. + +At this point, memory has not been allocated in the file's +`CachingInodeOperations`. It is also the case that the address range [`A`, +`A`+0x1000) has not been mapped on the host on behalf of the application. If the +application then tries to modify 8 bytes of the shared memory: + +``` +char buffer[] = "aaaaaaaa"; +memcpy(A, buffer, 8); +``` + +The host then sends a `SIGSEGV` to the sentry because the address range [`A`, +`A`+8) is not mapped on the host. The `SIGSEGV` indicates that the memory was +accessed writable. The sentry looks up the vma associated with [`A`, `A`+8), +finds the file that was mapped and its `CachingInodeOperations`. It then calls +`CachingInodeOperations.Translate` which allocates memory to back [`A`, `A`+8). +It may choose to allocate more memory (i.e. do "readahead") to minimize +subsequent faults. + +Memory that is allocated comes from a host tmpfs file (see +`pgalloc.MemoryFile`). The host tmpfs file memory is brought up to date with the +contents of the mapped file on its filesystem. The region of the host tmpfs file +that reflects the mapped file is then mapped into the host address space of the +application so that subsequent memory accesses do not repeatedly generate a +`SIGSEGV`. + +The range that was allocated, including any extra memory allocation to minimize +faults, is marked dirty due to the write fault. This overcounts dirty memory if +the extra memory allocated is never modified. + +To make the scenario more interesting, imagine that this application spawns +another process and maps the same file in the exact same way: + +``` +mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); +``` + +Imagine that this process then tries to modify the file again but with only 4 +bytes: + +``` +char buffer[] = "bbbb"; +memcpy(A, buffer, 4); +``` + +Since the first process has already mapped and accessed the same region of the +file writable, `CachingInodeOperations.Translate` is called but returns the +memory that has already been allocated rather than allocating new memory. The +address range [`A`, `A`+0x1000) reflects the same cached view of the file as the +first process sees. For example, reading 8 bytes from the file from either +process via read(2) starting at offset 0 returns a consistent "bbbbaaaa". + +When this process no longer needs the shared memory, it may do: + +``` +munmap(A, 0x1000); +``` + +At this point, the modified memory cached by the `CachingInodeOperations` is not +written back to the file because it is still in use by the first process that +mapped it. When the first process also does: + +``` +munmap(A, 0x1000); +``` + +Then the last memory mapping of the file at the range [0, 0x1000) is gone. The +file's `CachingInodeOperations` then starts writing back memory marked dirty to +the file on its filesystem. Once writing completes, regardless of whether it was +successful, the `CachingInodeOperations` frees the memory cached at the range +[0, 0x1000). + +Subsequent read(2) or write(2) operations on the file go directly to the +filesystem since there no longer exists memory for it in its +`CachingInodeOperations`. + +## Future Work + +### Page cache + +The sentry does not yet implement the readahead and writeback optimizations for +read(2) and write(2) respectively. To do so, on read(2) and/or write(2) the +sentry must ensure that memory is allocated in a page cache to read or write +into. However, the sentry cannot boundlessly allocate memory. If it did, the +host would eventually OOM-kill the sentry+application process. This means that +the sentry must implement a page cache memory allocation strategy that is +bounded by a global user or container imposed limit. When this limit is +approached, the sentry must decide from which page cache memory should be freed +so that it can allocate more memory. If it makes a poor decision, the sentry may +end up freeing and re-allocating memory to back regions of files that are +frequently used, nullifying the optimization (and in some cases causing worse +performance due to the overhead of memory allocation and general management). +This is a form of "cache thrashing". + +In Linux, much research has been done to select and implement a lightweight but +optimal page cache eviction algorithm. Linux makes use of hardware page bits to +keep track of whether memory has been accessed. The sentry does not have direct +access to hardware. Implementing a similarly lightweight and optimal page cache +eviction algorithm will need to either introduce a kernel interface to obtain +these page bits or find a suitable alternative proxy for access events. + +In Linux, readahead happens by default but is not always ideal. For instance, +for files that are not read sequentially, it would be more ideal to simply read +from only those regions of the file rather than to optimistically cache some +number of bytes ahead of the read (up to 2MB in Linux) if the bytes cached won't +be accessed. Linux implements the fadvise64(2) system call for applications to +specify that a range of a file will not be accessed sequentially. The advice bit +FADV_RANDOM turns off the readahead optimization for the given range in the +given file. However fadvise64 is rarely used by applications so Linux implements +a readahead backoff strategy if reads are not sequential. To ensure that +application performance is not degraded, the sentry must implement a similar +backoff strategy. diff --git a/pkg/sentry/fs/fsutil/dirty_set.go b/pkg/sentry/fs/fsutil/dirty_set.go new file mode 100644 index 000000000..c6cd45087 --- /dev/null +++ b/pkg/sentry/fs/fsutil/dirty_set.go @@ -0,0 +1,237 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "math" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/usermem" +) + +// DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to +// implement Mappables that cache data from another source. +// +// type DirtySet <generated by go_generics> + +// DirtyInfo is the value type of DirtySet, and represents information about a +// Mappable offset that is dirty (the cached data for that offset is newer than +// its source). +// +// +stateify savable +type DirtyInfo struct { + // Keep is true if the represented offset is concurrently writable, such + // that writing the data for that offset back to the source does not + // guarantee that the offset is clean (since it may be concurrently + // rewritten after the writeback). + Keep bool +} + +// dirtySetFunctions implements segment.Functions for DirtySet. +type dirtySetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (dirtySetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (dirtySetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (dirtySetFunctions) ClearValue(val *DirtyInfo) { +} + +// Merge implements segment.Functions.Merge. +func (dirtySetFunctions) Merge(_ memmap.MappableRange, val1 DirtyInfo, _ memmap.MappableRange, val2 DirtyInfo) (DirtyInfo, bool) { + if val1 != val2 { + return DirtyInfo{}, false + } + return val1, true +} + +// Split implements segment.Functions.Split. +func (dirtySetFunctions) Split(_ memmap.MappableRange, val DirtyInfo, _ uint64) (DirtyInfo, DirtyInfo) { + return val, val +} + +// MarkClean marks all offsets in mr as not dirty, except for those to which +// KeepDirty has been applied. +func (ds *DirtySet) MarkClean(mr memmap.MappableRange) { + seg := ds.LowerBoundSegment(mr.Start) + for seg.Ok() && seg.Start() < mr.End { + if seg.Value().Keep { + seg = seg.NextSegment() + continue + } + seg = ds.Isolate(seg, mr) + seg = ds.Remove(seg).NextSegment() + } +} + +// KeepClean marks all offsets in mr as not dirty, even those that were +// previously kept dirty by KeepDirty. +func (ds *DirtySet) KeepClean(mr memmap.MappableRange) { + ds.RemoveRange(mr) +} + +// MarkDirty marks all offsets in mr as dirty. +func (ds *DirtySet) MarkDirty(mr memmap.MappableRange) { + ds.setDirty(mr, false) +} + +// KeepDirty marks all offsets in mr as dirty and prevents them from being +// marked as clean by MarkClean. +func (ds *DirtySet) KeepDirty(mr memmap.MappableRange) { + ds.setDirty(mr, true) +} + +func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) { + var changedAny bool + defer func() { + if changedAny { + // Merge segments split by Isolate to reduce cost of iteration. + ds.MergeRange(mr) + } + }() + seg, gap := ds.Find(mr.Start) + for { + switch { + case seg.Ok() && seg.Start() < mr.End: + if keep && !seg.Value().Keep { + changedAny = true + seg = ds.Isolate(seg, mr) + seg.ValuePtr().Keep = true + } + seg, gap = seg.NextNonEmpty() + + case gap.Ok() && gap.Start() < mr.End: + changedAny = true + seg = ds.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep}) + seg, gap = seg.NextNonEmpty() + + default: + return + } + } +} + +// AllowClean allows MarkClean to mark offsets in mr as not dirty, ending the +// effect of a previous call to KeepDirty. (It does not itself mark those +// offsets as not dirty.) +func (ds *DirtySet) AllowClean(mr memmap.MappableRange) { + var changedAny bool + defer func() { + if changedAny { + // Merge segments split by Isolate to reduce cost of iteration. + ds.MergeRange(mr) + } + }() + for seg := ds.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() { + if seg.Value().Keep { + changedAny = true + seg = ds.Isolate(seg, mr) + seg.ValuePtr().Keep = false + } + } +} + +// SyncDirty passes pages in the range mr that are stored in cache and +// identified as dirty to writeAt, updating dirty to reflect successful writes. +// If writeAt returns a successful partial write, SyncDirty will call it +// repeatedly until all bytes have been written. max is the true size of the +// cached object; offsets beyond max will not be passed to writeAt, even if +// they are marked dirty. +func SyncDirty(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, dirty *DirtySet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + var changedDirty bool + defer func() { + if changedDirty { + // Merge segments split by Isolate to reduce cost of iteration. + dirty.MergeRange(mr) + } + }() + dseg := dirty.LowerBoundSegment(mr.Start) + for dseg.Ok() && dseg.Start() < mr.End { + var dr memmap.MappableRange + if dseg.Value().Keep { + dr = dseg.Range().Intersect(mr) + } else { + changedDirty = true + dseg = dirty.Isolate(dseg, mr) + dr = dseg.Range() + } + if err := syncDirtyRange(ctx, dr, cache, max, mem, writeAt); err != nil { + return err + } + if dseg.Value().Keep { + dseg = dseg.NextSegment() + } else { + dseg = dirty.Remove(dseg).NextSegment() + } + } + return nil +} + +// SyncDirtyAll passes all pages stored in cache identified as dirty to +// writeAt, updating dirty to reflect successful writes. If writeAt returns a +// successful partial write, SyncDirtyAll will call it repeatedly until all +// bytes have been written. max is the true size of the cached object; offsets +// beyond max will not be passed to writeAt, even if they are marked dirty. +func SyncDirtyAll(ctx context.Context, cache *FileRangeSet, dirty *DirtySet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + dseg := dirty.FirstSegment() + for dseg.Ok() { + if err := syncDirtyRange(ctx, dseg.Range(), cache, max, mem, writeAt); err != nil { + return err + } + if dseg.Value().Keep { + dseg = dseg.NextSegment() + } else { + dseg = dirty.Remove(dseg).NextSegment() + } + } + return nil +} + +// Preconditions: mr must be page-aligned. +func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + for cseg := cache.LowerBoundSegment(mr.Start); cseg.Ok() && cseg.Start() < mr.End; cseg = cseg.NextSegment() { + wbr := cseg.Range().Intersect(mr) + if max < wbr.Start { + break + } + ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read) + if err != nil { + return err + } + if max < wbr.End { + ims = ims.TakeFirst64(max - wbr.Start) + } + offset := wbr.Start + for !ims.IsEmpty() { + n, err := writeAt(ctx, ims, offset) + if err != nil { + return err + } + offset += n + ims = ims.DropFirst64(n) + } + } + return nil +} diff --git a/pkg/sentry/fs/fsutil/dirty_set_test.go b/pkg/sentry/fs/fsutil/dirty_set_test.go new file mode 100644 index 000000000..e3579c23c --- /dev/null +++ b/pkg/sentry/fs/fsutil/dirty_set_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "reflect" + "testing" + + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/usermem" +) + +func TestDirtySet(t *testing.T) { + var set DirtySet + set.MarkDirty(memmap.MappableRange{0, 2 * usermem.PageSize}) + set.KeepDirty(memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize}) + set.MarkClean(memmap.MappableRange{0, 2 * usermem.PageSize}) + want := &DirtySegmentDataSlices{ + Start: []uint64{usermem.PageSize}, + End: []uint64{2 * usermem.PageSize}, + Values: []DirtyInfo{{Keep: true}}, + } + if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) { + t.Errorf("set:\n\tgot %v,\n\twant %v", got, want) + } +} diff --git a/pkg/sentry/fs/fsutil/file.go b/pkg/sentry/fs/fsutil/file.go new file mode 100644 index 000000000..08695391c --- /dev/null +++ b/pkg/sentry/fs/fsutil/file.go @@ -0,0 +1,396 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "io" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// FileNoopRelease implements fs.FileOperations.Release for files that have no +// resources to release. +type FileNoopRelease struct{} + +// Release is a no-op. +func (FileNoopRelease) Release() {} + +// SeekWithDirCursor is used to implement fs.FileOperations.Seek. If dirCursor +// is not nil and the seek was on a directory, the cursor will be updated. +// +// Currently only seeking to 0 on a directory is supported. +// +// FIXME(b/33075855): Lift directory seeking limitations. +func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64, dirCursor *string) (int64, error) { + inode := file.Dirent.Inode + current := file.Offset() + + // Does the Inode represents a non-seekable type? + if fs.IsPipe(inode.StableAttr) || fs.IsSocket(inode.StableAttr) { + return current, syserror.ESPIPE + } + + // Does the Inode represent a character device? + if fs.IsCharDevice(inode.StableAttr) { + // Ignore seek requests. + // + // FIXME(b/34716638): This preserves existing + // behavior but is not universally correct. + return 0, nil + } + + // Otherwise compute the new offset. + switch whence { + case fs.SeekSet: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.SpecialFile, fs.BlockDevice: + if offset < 0 { + return current, syserror.EINVAL + } + return offset, nil + case fs.Directory, fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + // SEEK_SET to 0 moves the directory "cursor" to the beginning. + if dirCursor != nil { + *dirCursor = "" + } + return 0, nil + default: + return current, syserror.EINVAL + } + case fs.SeekCurrent: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.SpecialFile, fs.BlockDevice: + if current+offset < 0 { + return current, syserror.EINVAL + } + return current + offset, nil + case fs.Directory, fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + return current, nil + default: + return current, syserror.EINVAL + } + case fs.SeekEnd: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.BlockDevice: + // Allow the file to determine the end. + uattr, err := inode.UnstableAttr(ctx) + if err != nil { + return current, err + } + sz := uattr.Size + if sz+offset < 0 { + return current, syserror.EINVAL + } + return sz + offset, nil + // FIXME(b/34778850): This is not universally correct. + // Remove SpecialDirectory. + case fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + // SEEK_END to 0 moves the directory "cursor" to the end. + // + // FIXME(b/35442290): The ensures that after the seek, + // reading on the directory will get EOF. But it is not + // correct in general because the directory can grow in + // size; attempting to read those new entries will be + // futile (EOF will always be the result). + return fs.FileMaxOffset, nil + default: + return current, syserror.EINVAL + } + } + + // Not a valid seek request. + return current, syserror.EINVAL +} + +// FileGenericSeek implements fs.FileOperations.Seek for files that use a +// generic seek implementation. +type FileGenericSeek struct{} + +// Seek implements fs.FileOperations.Seek. +func (FileGenericSeek) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return SeekWithDirCursor(ctx, file, whence, offset, nil) +} + +// FileZeroSeek implements fs.FileOperations.Seek for files that maintain a +// constant zero-value offset and require a no-op Seek. +type FileZeroSeek struct{} + +// Seek implements fs.FileOperations.Seek. +func (FileZeroSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) { + return 0, nil +} + +// FileNoSeek implements fs.FileOperations.Seek to return EINVAL. +type FileNoSeek struct{} + +// Seek implements fs.FileOperations.Seek. +func (FileNoSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) { + return 0, syserror.EINVAL +} + +// FilePipeSeek implements fs.FileOperations.Seek and can be used for files +// that behave like pipes (seeking is not supported). +type FilePipeSeek struct{} + +// Seek implements fs.FileOperations.Seek. +func (FilePipeSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) { + return 0, syserror.ESPIPE +} + +// FileNotDirReaddir implements fs.FileOperations.Readdir for non-directories. +type FileNotDirReaddir struct{} + +// Readdir implements fs.FileOperations.FileNotDirReaddir. +func (FileNotDirReaddir) Readdir(context.Context, *fs.File, fs.DentrySerializer) (int64, error) { + return 0, syserror.ENOTDIR +} + +// FileNoFsync implements fs.FileOperations.Fsync for files that don't support +// syncing. +type FileNoFsync struct{} + +// Fsync implements fs.FileOperations.Fsync. +func (FileNoFsync) Fsync(context.Context, *fs.File, int64, int64, fs.SyncType) error { + return syserror.EINVAL +} + +// FileNoopFsync implements fs.FileOperations.Fsync for files that don't need +// to synced. +type FileNoopFsync struct{} + +// Fsync implements fs.FileOperations.Fsync. +func (FileNoopFsync) Fsync(context.Context, *fs.File, int64, int64, fs.SyncType) error { + return nil +} + +// FileNoopFlush implements fs.FileOperations.Flush as a no-op. +type FileNoopFlush struct{} + +// Flush implements fs.FileOperations.Flush. +func (FileNoopFlush) Flush(context.Context, *fs.File) error { + return nil +} + +// FileNoMMap implements fs.FileOperations.Mappable for files that cannot +// be memory mapped. +type FileNoMMap struct{} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (FileNoMMap) ConfigureMMap(context.Context, *fs.File, *memmap.MMapOpts) error { + return syserror.ENODEV +} + +// GenericConfigureMMap implements fs.FileOperations.ConfigureMMap for most +// filesystems that support memory mapping. +func GenericConfigureMMap(file *fs.File, m memmap.Mappable, opts *memmap.MMapOpts) error { + opts.Mappable = m + opts.MappingIdentity = file + file.IncRef() + return nil +} + +// FileNoIoctl implements fs.FileOperations.Ioctl for files that don't +// implement the ioctl syscall. +type FileNoIoctl struct{} + +// Ioctl implements fs.FileOperations.Ioctl. +func (FileNoIoctl) Ioctl(context.Context, *fs.File, usermem.IO, arch.SyscallArguments) (uintptr, error) { + return 0, syserror.ENOTTY +} + +// FileNoSplice implements fs.FileOperations.ReadFrom and +// fs.FileOperations.WriteTo for files that don't support splice. +type FileNoSplice struct{} + +// WriteTo implements fs.FileOperations.WriteTo. +func (FileNoSplice) WriteTo(context.Context, *fs.File, io.Writer, int64, bool) (int64, error) { + return 0, syserror.ENOSYS +} + +// ReadFrom implements fs.FileOperations.ReadFrom. +func (FileNoSplice) ReadFrom(context.Context, *fs.File, io.Reader, int64) (int64, error) { + return 0, syserror.ENOSYS +} + +// DirFileOperations implements most of fs.FileOperations for directories, +// except for Readdir and UnstableAttr which the embedding type must implement. +type DirFileOperations struct { + waiter.AlwaysReady + FileGenericSeek + FileNoIoctl + FileNoMMap + FileNoopFlush + FileNoopFsync + FileNoopRelease + FileNoSplice +} + +// Read implements fs.FileOperations.Read +func (*DirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} + +// Write implements fs.FileOperations.Write. +func (*DirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} + +// StaticDirFileOperations implements fs.FileOperations for directories with +// static children. +// +// +stateify savable +type StaticDirFileOperations struct { + DirFileOperations `state:"nosave"` + FileUseInodeUnstableAttr `state:"nosave"` + + // dentryMap is a SortedDentryMap used to implement Readdir. + dentryMap *fs.SortedDentryMap + + // dirCursor contains the name of the last directory entry that was + // serialized. + dirCursor string +} + +// NewStaticDirFileOperations returns a new StaticDirFileOperations that will +// iterate the given denty map. +func NewStaticDirFileOperations(dentries *fs.SortedDentryMap) *StaticDirFileOperations { + return &StaticDirFileOperations{ + dentryMap: dentries, + } +} + +// IterateDir implements DirIterator.IterateDir. +func (sdfo *StaticDirFileOperations) IterateDir(ctx context.Context, d *fs.Dirent, dirCtx *fs.DirCtx, offset int) (int, error) { + n, err := fs.GenericReaddir(dirCtx, sdfo.dentryMap) + return offset + n, err +} + +// Readdir implements fs.FileOperations.Readdir. +func (sdfo *StaticDirFileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &sdfo.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, sdfo, root, dirCtx, file.Offset()) +} + +// NoReadWriteFile is a file that does not support reading or writing. +// +// +stateify savable +type NoReadWriteFile struct { + waiter.AlwaysReady `state:"nosave"` + FileGenericSeek `state:"nosave"` + FileNoIoctl `state:"nosave"` + FileNoMMap `state:"nosave"` + FileNoopFsync `state:"nosave"` + FileNoopFlush `state:"nosave"` + FileNoopRelease `state:"nosave"` + FileNoRead `state:"nosave"` + FileNoWrite `state:"nosave"` + FileNotDirReaddir `state:"nosave"` + FileUseInodeUnstableAttr `state:"nosave"` + FileNoSplice `state:"nosave"` +} + +var _ fs.FileOperations = (*NoReadWriteFile)(nil) + +// FileStaticContentReader is a helper to implement fs.FileOperations.Read with +// static content. +// +// +stateify savable +type FileStaticContentReader struct { + // content is immutable. + content []byte +} + +// NewFileStaticContentReader initializes a FileStaticContentReader with the +// given content. +func NewFileStaticContentReader(b []byte) FileStaticContentReader { + return FileStaticContentReader{ + content: b, + } +} + +// Read implements fs.FileOperations.Read. +func (scr *FileStaticContentReader) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + if offset >= int64(len(scr.content)) { + return 0, nil + } + n, err := dst.CopyOut(ctx, scr.content[offset:]) + return int64(n), err +} + +// FileNoopWrite implements fs.FileOperations.Write as a noop. +type FileNoopWrite struct{} + +// Write implements fs.FileOperations.Write. +func (FileNoopWrite) Write(_ context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + return src.NumBytes(), nil +} + +// FileNoRead implements fs.FileOperations.Read to return EINVAL. +type FileNoRead struct{} + +// Read implements fs.FileOperations.Read. +func (FileNoRead) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EINVAL +} + +// FileNoWrite implements fs.FileOperations.Write to return EINVAL. +type FileNoWrite struct{} + +// Write implements fs.FileOperations.Write. +func (FileNoWrite) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EINVAL +} + +// FileNoopRead implement fs.FileOperations.Read as a noop. +type FileNoopRead struct{} + +// Read implements fs.FileOperations.Read. +func (FileNoopRead) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, nil +} + +// FileUseInodeUnstableAttr implements fs.FileOperations.UnstableAttr by calling +// InodeOperations.UnstableAttr. +type FileUseInodeUnstableAttr struct{} + +// UnstableAttr implements fs.FileOperations.UnstableAttr. +func (FileUseInodeUnstableAttr) UnstableAttr(ctx context.Context, file *fs.File) (fs.UnstableAttr, error) { + return file.Dirent.Inode.UnstableAttr(ctx) +} diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go new file mode 100644 index 000000000..5643cdac9 --- /dev/null +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -0,0 +1,209 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "io" + "math" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/pgalloc" + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/usermem" +) + +// FileRangeSet maps offsets into a memmap.Mappable to offsets into a +// platform.File. It is used to implement Mappables that store data in +// sparsely-allocated memory. +// +// type FileRangeSet <generated by go_generics> + +// FileRangeSetFunctions implements segment.Functions for FileRangeSet. +type FileRangeSetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (FileRangeSetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (FileRangeSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (FileRangeSetFunctions) ClearValue(_ *uint64) { +} + +// Merge implements segment.Functions.Merge. +func (FileRangeSetFunctions) Merge(mr1 memmap.MappableRange, frstart1 uint64, _ memmap.MappableRange, frstart2 uint64) (uint64, bool) { + if frstart1+mr1.Length() != frstart2 { + return 0, false + } + return frstart1, true +} + +// Split implements segment.Functions.Split. +func (FileRangeSetFunctions) Split(mr memmap.MappableRange, frstart uint64, split uint64) (uint64, uint64) { + return frstart, frstart + (split - mr.Start) +} + +// FileRange returns the FileRange mapped by seg. +func (seg FileRangeIterator) FileRange() platform.FileRange { + return seg.FileRangeOf(seg.Range()) +} + +// FileRangeOf returns the FileRange mapped by mr. +// +// Preconditions: seg.Range().IsSupersetOf(mr). mr.Length() != 0. +func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileRange { + frstart := seg.Value() + (mr.Start - seg.Start()) + return platform.FileRange{frstart, frstart + mr.Length()} +} + +// Fill attempts to ensure that all memmap.Mappable offsets in required are +// mapped to a platform.File offset, by allocating from mf with the given +// memory usage kind and invoking readAt to store data into memory. (If readAt +// returns a successful partial read, Fill will call it repeatedly until all +// bytes have been read.) EOF is handled consistently with the requirements of +// mmap(2): bytes after EOF on the same page are zeroed; pages after EOF are +// invalid. +// +// Fill may read offsets outside of required, but will never read offsets +// outside of optional. It returns a non-nil error if any error occurs, even +// if the error only affects offsets in optional, but not in required. +// +// Preconditions: required.Length() > 0. optional.IsSupersetOf(required). +// required and optional must be page-aligned. +func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { + gap := frs.LowerBoundGap(required.Start) + for gap.Ok() && gap.Start() < required.End { + if gap.Range().Length() == 0 { + gap = gap.NextGap() + continue + } + gr := gap.Range().Intersect(optional) + + // Read data into the gap. + fr, err := mf.AllocateAndFill(gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + var done uint64 + for !dsts.IsEmpty() { + n, err := readAt(ctx, dsts, gr.Start+done) + done += n + dsts = dsts.DropFirst64(n) + if err != nil { + if err == io.EOF { + // MemoryFile.AllocateAndFill truncates down to a page + // boundary, but FileRangeSet.Fill is supposed to + // zero-fill to the end of the page in this case. + donepgaddr, ok := usermem.Addr(done).RoundUp() + if donepg := uint64(donepgaddr); ok && donepg != done { + dsts.DropFirst64(donepg - done) + done = donepg + if dsts.IsEmpty() { + return done, nil + } + } + } + return done, err + } + } + return done, nil + })) + + // Store anything we managed to read into the cache. + if done := fr.Length(); done != 0 { + gr.End = gr.Start + done + gap = frs.Insert(gap, gr, fr.Start).NextGap() + } + + if err != nil { + return err + } + } + return nil +} + +// Drop removes segments for memmap.Mappable offsets in mr, freeing the +// corresponding platform.FileRanges. +// +// Preconditions: mr must be page-aligned. +func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { + seg := frs.LowerBoundSegment(mr.Start) + for seg.Ok() && seg.Start() < mr.End { + seg = frs.Isolate(seg, mr) + mf.DecRef(seg.FileRange()) + seg = frs.Remove(seg).NextSegment() + } +} + +// DropAll removes all segments in mr, freeing the corresponding +// platform.FileRanges. +func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) { + for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + mf.DecRef(seg.FileRange()) + } + frs.RemoveAll() +} + +// Truncate updates frs to reflect Mappable truncation to the given length: +// bytes after the new EOF on the same page are zeroed, and pages after the new +// EOF are freed. +func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { + pgendaddr, ok := usermem.Addr(end).RoundUp() + if ok { + pgend := uint64(pgendaddr) + + // Free truncated pages. + frs.SplitAt(pgend) + seg := frs.LowerBoundSegment(pgend) + for seg.Ok() { + mf.DecRef(seg.FileRange()) + seg = frs.Remove(seg).NextSegment() + } + + if end == pgend { + return + } + } + + // Here we know end < end.RoundUp(). If the new EOF lands in the + // middle of a page that we have, zero out its contents beyond the new + // length. + seg := frs.FindSegment(end) + if seg.Ok() { + fr := seg.FileRange() + fr.Start += end - seg.Start() + ims, err := mf.MapInternal(fr, usermem.Write) + if err != nil { + // There's no good recourse from here. This means + // that we can't keep cached memory consistent with + // the new end of file. The caller may have already + // updated the file size on their backing file system. + // + // We don't want to risk blindly continuing onward, + // so in the extremely rare cases this does happen, + // we abandon ship. + panic(fmt.Sprintf("Failed to map %v: %v", fr, err)) + } + if _, err := safemem.ZeroSeq(ims); err != nil { + panic(fmt.Sprintf("Zeroing %v failed: %v", fr, err)) + } + } +} diff --git a/pkg/sentry/fs/fsutil/frame_ref_set.go b/pkg/sentry/fs/fsutil/frame_ref_set.go new file mode 100644 index 000000000..dd6f5aba6 --- /dev/null +++ b/pkg/sentry/fs/fsutil/frame_ref_set.go @@ -0,0 +1,91 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "math" + + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/sentry/usage" +) + +// FrameRefSetFunctions implements segment.Functions for FrameRefSet. +type FrameRefSetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (FrameRefSetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (FrameRefSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (FrameRefSetFunctions) ClearValue(val *uint64) { +} + +// Merge implements segment.Functions.Merge. +func (FrameRefSetFunctions) Merge(_ platform.FileRange, val1 uint64, _ platform.FileRange, val2 uint64) (uint64, bool) { + if val1 != val2 { + return 0, false + } + return val1, true +} + +// Split implements segment.Functions.Split. +func (FrameRefSetFunctions) Split(_ platform.FileRange, val uint64, _ uint64) (uint64, uint64) { + return val, val +} + +// IncRefAndAccount adds a reference on the range fr. All newly inserted segments +// are accounted as host page cache memory mappings. +func (refs *FrameRefSet) IncRefAndAccount(fr platform.FileRange) { + seg, gap := refs.Find(fr.Start) + for { + switch { + case seg.Ok() && seg.Start() < fr.End: + seg = refs.Isolate(seg, fr) + seg.SetValue(seg.Value() + 1) + seg, gap = seg.NextNonEmpty() + case gap.Ok() && gap.Start() < fr.End: + newRange := gap.Range().Intersect(fr) + usage.MemoryAccounting.Inc(newRange.Length(), usage.Mapped) + seg, gap = refs.InsertWithoutMerging(gap, newRange, 1).NextNonEmpty() + default: + refs.MergeAdjacent(fr) + return + } + } +} + +// DecRefAndAccount removes a reference on the range fr and untracks segments +// that are removed from memory accounting. +func (refs *FrameRefSet) DecRefAndAccount(fr platform.FileRange) { + seg := refs.FindSegment(fr.Start) + + for seg.Ok() && seg.Start() < fr.End { + seg = refs.Isolate(seg, fr) + if old := seg.Value(); old == 1 { + usage.MemoryAccounting.Dec(seg.Range().Length(), usage.Mapped) + seg = refs.Remove(seg).NextSegment() + } else { + seg.SetValue(old - 1) + seg = seg.NextSegment() + } + } + refs.MergeAdjacent(fr) +} diff --git a/pkg/sentry/fs/fsutil/fsutil.go b/pkg/sentry/fs/fsutil/fsutil.go new file mode 100644 index 000000000..c9587b1d9 --- /dev/null +++ b/pkg/sentry/fs/fsutil/fsutil.go @@ -0,0 +1,24 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fsutil provides utilities for implementing fs.InodeOperations +// and fs.FileOperations: +// +// - For embeddable utilities, see inode.go and file.go. +// +// - For fs.Inodes that require a page cache to be memory mapped, see +// inode_cache.go. +// +// - For anon fs.Inodes, see anon.go. +package fsutil diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go new file mode 100644 index 000000000..e82afd112 --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper.go @@ -0,0 +1,242 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/usermem" +) + +// HostFileMapper caches mappings of an arbitrary host file descriptor. It is +// used by implementations of memmap.Mappable that represent a host file +// descriptor. +// +// +stateify savable +type HostFileMapper struct { + // HostFile conceptually breaks the file into pieces called chunks, of + // size and alignment chunkSize, and caches mappings of the file on a chunk + // granularity. + + refsMu sync.Mutex `state:"nosave"` + + // refs maps chunk start offsets to the sum of reference counts for all + // pages in that chunk. refs is protected by refsMu. + refs map[uint64]int32 + + mapsMu sync.Mutex `state:"nosave"` + + // mappings maps chunk start offsets to mappings of those chunks, + // obtained by calling syscall.Mmap. mappings is protected by + // mapsMu. + mappings map[uint64]mapping `state:"nosave"` +} + +const ( + chunkShift = usermem.HugePageShift + chunkSize = 1 << chunkShift + chunkMask = chunkSize - 1 +) + +func pagesInChunk(mr memmap.MappableRange, chunkStart uint64) int32 { + return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / usermem.PageSize) +} + +type mapping struct { + addr uintptr + writable bool +} + +// Init must be called on zero-value HostFileMappers before first use. +func (f *HostFileMapper) Init() { + f.refs = make(map[uint64]int32) + f.mappings = make(map[uint64]mapping) +} + +// NewHostFileMapper returns an initialized HostFileMapper allocated on the +// heap with no references or cached mappings. +func NewHostFileMapper() *HostFileMapper { + f := &HostFileMapper{} + f.Init() + return f +} + +// IncRefOn increments the reference count on all offsets in mr. +// +// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned. +func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) { + f.refsMu.Lock() + defer f.refsMu.Unlock() + for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize { + refs := f.refs[chunkStart] + pgs := pagesInChunk(mr, chunkStart) + if refs+pgs < refs { + // Would overflow. + panic(fmt.Sprintf("HostFileMapper.IncRefOn(%v): adding %d page references to chunk %#x, which has %d page references", mr, pgs, chunkStart, refs)) + } + f.refs[chunkStart] = refs + pgs + } +} + +// DecRefOn decrements the reference count on all offsets in mr. +// +// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned. +func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) { + f.refsMu.Lock() + defer f.refsMu.Unlock() + for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize { + refs := f.refs[chunkStart] + pgs := pagesInChunk(mr, chunkStart) + switch { + case refs > pgs: + f.refs[chunkStart] = refs - pgs + case refs == pgs: + f.mapsMu.Lock() + delete(f.refs, chunkStart) + if m, ok := f.mappings[chunkStart]; ok { + f.unmapAndRemoveLocked(chunkStart, m) + } + f.mapsMu.Unlock() + case refs < pgs: + panic(fmt.Sprintf("HostFileMapper.DecRefOn(%v): removing %d page references from chunk %#x, which has %d page references", mr, pgs, chunkStart, refs)) + } + } +} + +// MapInternal returns a mapping of offsets in fr from fd. The returned +// safemem.BlockSeq is valid as long as at least one reference is held on all +// offsets in fr or until the next call to UnmapAll. +// +// Preconditions: The caller must hold a reference on all offsets in fr. +func (f *HostFileMapper) MapInternal(fr platform.FileRange, fd int, write bool) (safemem.BlockSeq, error) { + chunks := ((fr.End + chunkMask) >> chunkShift) - (fr.Start >> chunkShift) + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + if chunks == 1 { + // Avoid an unnecessary slice allocation. + var seq safemem.BlockSeq + err := f.forEachMappingBlockLocked(fr, fd, write, func(b safemem.Block) { + seq = safemem.BlockSeqOf(b) + }) + return seq, err + } + blocks := make([]safemem.Block, 0, chunks) + err := f.forEachMappingBlockLocked(fr, fd, write, func(b safemem.Block) { + blocks = append(blocks, b) + }) + return safemem.BlockSeqFromSlice(blocks), err +} + +// Preconditions: f.mapsMu must be locked. +func (f *HostFileMapper) forEachMappingBlockLocked(fr platform.FileRange, fd int, write bool, fn func(safemem.Block)) error { + prot := syscall.PROT_READ + if write { + prot |= syscall.PROT_WRITE + } + for chunkStart := fr.Start &^ chunkMask; chunkStart < fr.End; chunkStart += chunkSize { + m, ok := f.mappings[chunkStart] + if !ok { + addr, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + 0, + chunkSize, + uintptr(prot), + syscall.MAP_SHARED, + uintptr(fd), + uintptr(chunkStart)) + if errno != 0 { + return errno + } + m = mapping{addr, write} + f.mappings[chunkStart] = m + } else if write && !m.writable { + addr, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + m.addr, + chunkSize, + uintptr(prot), + syscall.MAP_SHARED|syscall.MAP_FIXED, + uintptr(fd), + uintptr(chunkStart)) + if errno != 0 { + return errno + } + m = mapping{addr, write} + f.mappings[chunkStart] = m + } + var startOff uint64 + if chunkStart < fr.Start { + startOff = fr.Start - chunkStart + } + endOff := uint64(chunkSize) + if chunkStart+chunkSize > fr.End { + endOff = fr.End - chunkStart + } + fn(f.unsafeBlockFromChunkMapping(m.addr).TakeFirst64(endOff).DropFirst64(startOff)) + } + return nil +} + +// UnmapAll unmaps all cached mappings. Callers are responsible for +// synchronization with mappings returned by previous calls to MapInternal. +func (f *HostFileMapper) UnmapAll() { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + for chunkStart, m := range f.mappings { + f.unmapAndRemoveLocked(chunkStart, m) + } +} + +// Preconditions: f.mapsMu must be locked. f.mappings[chunkStart] == m. +func (f *HostFileMapper) unmapAndRemoveLocked(chunkStart uint64, m mapping) { + if _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, m.addr, chunkSize, 0); errno != 0 { + // This leaks address space and is unexpected, but is otherwise + // harmless, so complain but don't panic. + log.Warningf("HostFileMapper: failed to unmap mapping %#x for chunk %#x: %v", m.addr, chunkStart, errno) + } + delete(f.mappings, chunkStart) +} + +// RegenerateMappings must be called when the file description mapped by f +// changes, to replace existing mappings of the previous file description. +func (f *HostFileMapper) RegenerateMappings(fd int) error { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + + for chunkStart, m := range f.mappings { + prot := syscall.PROT_READ + if m.writable { + prot |= syscall.PROT_WRITE + } + _, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + m.addr, + chunkSize, + uintptr(prot), + syscall.MAP_SHARED|syscall.MAP_FIXED, + uintptr(fd), + uintptr(chunkStart)) + if errno != 0 { + return errno + } + } + return nil +} diff --git a/pkg/sentry/fs/fsutil/host_file_mapper_state.go b/pkg/sentry/fs/fsutil/host_file_mapper_state.go new file mode 100644 index 000000000..576d2a3df --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper_state.go @@ -0,0 +1,20 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +// afterLoad is invoked by stateify. +func (f *HostFileMapper) afterLoad() { + f.mappings = make(map[uint64]mapping) +} diff --git a/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go b/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go new file mode 100644 index 000000000..2d4778d64 --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go @@ -0,0 +1,27 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "unsafe" + + "gvisor.dev/gvisor/pkg/safemem" +) + +func (*HostFileMapper) unsafeBlockFromChunkMapping(addr uintptr) safemem.Block { + // We don't control the host file's length, so touching its mappings may + // raise SIGBUS. Thus accesses to it must use safecopy. + return safemem.BlockFromUnsafePointer((unsafe.Pointer)(addr), chunkSize) +} diff --git a/pkg/sentry/fs/fsutil/host_mappable.go b/pkg/sentry/fs/fsutil/host_mappable.go new file mode 100644 index 000000000..78fec553e --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_mappable.go @@ -0,0 +1,214 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "math" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/usermem" +) + +// HostMappable implements memmap.Mappable and platform.File over a +// CachedFileObject. +// +// Lock order (compare the lock order model in mm/mm.go): +// truncateMu ("fs locks") +// mu ("memmap.Mappable locks not taken by Translate") +// ("platform.File locks") +// backingFile ("CachedFileObject locks") +// +// +stateify savable +type HostMappable struct { + hostFileMapper *HostFileMapper + + backingFile CachedFileObject + + mu sync.Mutex `state:"nosave"` + + // mappings tracks mappings of the cached file object into + // memmap.MappingSpaces so it can invalidated upon save. Protected by mu. + mappings memmap.MappingSet + + // truncateMu protects writes and truncations. See Truncate() for details. + truncateMu sync.RWMutex `state:"nosave"` +} + +// NewHostMappable creates a new mappable that maps directly to host FD. +func NewHostMappable(backingFile CachedFileObject) *HostMappable { + return &HostMappable{ + hostFileMapper: NewHostFileMapper(), + backingFile: backingFile, + } +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (h *HostMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { + // Hot path. Avoid defers. + h.mu.Lock() + mapped := h.mappings.AddMapping(ms, ar, offset, writable) + for _, r := range mapped { + h.hostFileMapper.IncRefOn(r) + } + h.mu.Unlock() + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (h *HostMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { + // Hot path. Avoid defers. + h.mu.Lock() + unmapped := h.mappings.RemoveMapping(ms, ar, offset, writable) + for _, r := range unmapped { + h.hostFileMapper.DecRefOn(r) + } + h.mu.Unlock() +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (h *HostMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { + return h.AddMapping(ctx, ms, dstAR, offset, writable) +} + +// Translate implements memmap.Mappable.Translate. +func (h *HostMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + return []memmap.Translation{ + { + Source: optional, + File: h, + Offset: optional.Start, + Perms: usermem.AnyAccess, + }, + }, nil +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (h *HostMappable) InvalidateUnsavable(_ context.Context) error { + h.mu.Lock() + h.mappings.InvalidateAll(memmap.InvalidateOpts{}) + h.mu.Unlock() + return nil +} + +// NotifyChangeFD must be called after the file description represented by +// CachedFileObject.FD() changes. +func (h *HostMappable) NotifyChangeFD() error { + // Update existing sentry mappings to refer to the new file description. + if err := h.hostFileMapper.RegenerateMappings(h.backingFile.FD()); err != nil { + return err + } + + // Shoot down existing application mappings of the old file description; + // they will be remapped with the new file description on demand. + h.mu.Lock() + defer h.mu.Unlock() + + h.mappings.InvalidateAll(memmap.InvalidateOpts{}) + return nil +} + +// MapInternal implements platform.File.MapInternal. +func (h *HostMappable) MapInternal(fr platform.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { + return h.hostFileMapper.MapInternal(fr, h.backingFile.FD(), at.Write) +} + +// FD implements platform.File.FD. +func (h *HostMappable) FD() int { + return h.backingFile.FD() +} + +// IncRef implements platform.File.IncRef. +func (h *HostMappable) IncRef(fr platform.FileRange) { + mr := memmap.MappableRange{Start: fr.Start, End: fr.End} + h.hostFileMapper.IncRefOn(mr) +} + +// DecRef implements platform.File.DecRef. +func (h *HostMappable) DecRef(fr platform.FileRange) { + mr := memmap.MappableRange{Start: fr.Start, End: fr.End} + h.hostFileMapper.DecRefOn(mr) +} + +// Truncate truncates the file, invalidating any mapping that may have been +// removed after the size change. +// +// Truncation and writes are synchronized to prevent races where writes make the +// file grow between truncation and invalidation below: +// T1: Calls SetMaskedAttributes and stalls +// T2: Appends to file causing it to grow +// T2: Writes to mapped pages and COW happens +// T1: Continues and wronly invalidates the page mapped in step above. +func (h *HostMappable) Truncate(ctx context.Context, newSize int64) error { + h.truncateMu.Lock() + defer h.truncateMu.Unlock() + + mask := fs.AttrMask{Size: true} + attr := fs.UnstableAttr{Size: newSize} + if err := h.backingFile.SetMaskedAttributes(ctx, mask, attr, false); err != nil { + return err + } + + // Invalidate COW mappings that may exist beyond the new size in case the file + // is being shrunk. Other mappings don't need to be invalidated because + // translate will just return identical mappings after invalidation anyway, + // and SIGBUS will be raised and handled when the mappings are touched. + // + // Compare Linux's mm/truncate.c:truncate_setsize() => + // truncate_pagecache() => + // mm/memory.c:unmap_mapping_range(evencows=1). + h.mu.Lock() + defer h.mu.Unlock() + mr := memmap.MappableRange{ + Start: fs.OffsetPageEnd(newSize), + End: fs.OffsetPageEnd(math.MaxInt64), + } + h.mappings.Invalidate(mr, memmap.InvalidateOpts{InvalidatePrivate: true}) + + return nil +} + +// Allocate reserves space in the backing file. +func (h *HostMappable) Allocate(ctx context.Context, offset int64, length int64) error { + h.truncateMu.RLock() + err := h.backingFile.Allocate(ctx, offset, length) + h.truncateMu.RUnlock() + return err +} + +// Write writes to the file backing this mappable. +func (h *HostMappable) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + h.truncateMu.RLock() + n, err := src.CopyInTo(ctx, &writer{ctx: ctx, hostMappable: h, off: offset}) + h.truncateMu.RUnlock() + return n, err +} + +type writer struct { + ctx context.Context + hostMappable *HostMappable + off int64 +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (w *writer) WriteFromBlocks(src safemem.BlockSeq) (uint64, error) { + n, err := w.hostMappable.backingFile.WriteFromBlocksAt(w.ctx, src, uint64(w.off)) + w.off += int64(n) + return n, err +} diff --git a/pkg/sentry/fs/fsutil/inode.go b/pkg/sentry/fs/fsutil/inode.go new file mode 100644 index 000000000..1922ff08c --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode.go @@ -0,0 +1,531 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/waiter" +) + +// SimpleFileInode is a simple implementation of InodeOperations. +// +// +stateify savable +type SimpleFileInode struct { + InodeGenericChecker `state:"nosave"` + InodeNoExtendedAttributes `state:"nosave"` + InodeNoopRelease `state:"nosave"` + InodeNoopWriteOut `state:"nosave"` + InodeNotAllocatable `state:"nosave"` + InodeNotDirectory `state:"nosave"` + InodeNotMappable `state:"nosave"` + InodeNotOpenable `state:"nosave"` + InodeNotSocket `state:"nosave"` + InodeNotSymlink `state:"nosave"` + InodeNotTruncatable `state:"nosave"` + InodeNotVirtual `state:"nosave"` + + InodeSimpleAttributes +} + +// NewSimpleFileInode returns a new SimpleFileInode. +func NewSimpleFileInode(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, typ uint64) *SimpleFileInode { + return &SimpleFileInode{ + InodeSimpleAttributes: NewInodeSimpleAttributes(ctx, owner, perms, typ), + } +} + +// NoReadWriteFileInode is an implementation of InodeOperations that supports +// opening files that are not readable or writeable. +// +// +stateify savable +type NoReadWriteFileInode struct { + InodeGenericChecker `state:"nosave"` + InodeNoExtendedAttributes `state:"nosave"` + InodeNoopRelease `state:"nosave"` + InodeNoopWriteOut `state:"nosave"` + InodeNotAllocatable `state:"nosave"` + InodeNotDirectory `state:"nosave"` + InodeNotMappable `state:"nosave"` + InodeNotSocket `state:"nosave"` + InodeNotSymlink `state:"nosave"` + InodeNotTruncatable `state:"nosave"` + InodeNotVirtual `state:"nosave"` + + InodeSimpleAttributes +} + +// NewNoReadWriteFileInode returns a new NoReadWriteFileInode. +func NewNoReadWriteFileInode(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, typ uint64) *NoReadWriteFileInode { + return &NoReadWriteFileInode{ + InodeSimpleAttributes: NewInodeSimpleAttributes(ctx, owner, perms, typ), + } +} + +// GetFile implements fs.InodeOperations.GetFile. +func (*NoReadWriteFileInode) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &NoReadWriteFile{}), nil +} + +// InodeSimpleAttributes implements methods for updating in-memory unstable +// attributes. +// +// +stateify savable +type InodeSimpleAttributes struct { + // fsType is the immutable filesystem type that will be returned by + // StatFS. + fsType uint64 + + // mu protects unstable. + mu sync.RWMutex `state:"nosave"` + unstable fs.UnstableAttr +} + +// NewInodeSimpleAttributes returns a new InodeSimpleAttributes with the given +// owner and permissions, and all timestamps set to the current time. +func NewInodeSimpleAttributes(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, typ uint64) InodeSimpleAttributes { + return NewInodeSimpleAttributesWithUnstable(fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: perms, + }), typ) +} + +// NewInodeSimpleAttributesWithUnstable returns a new InodeSimpleAttributes +// with the given unstable attributes. +func NewInodeSimpleAttributesWithUnstable(uattr fs.UnstableAttr, typ uint64) InodeSimpleAttributes { + return InodeSimpleAttributes{ + fsType: typ, + unstable: uattr, + } +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *InodeSimpleAttributes) UnstableAttr(ctx context.Context, _ *fs.Inode) (fs.UnstableAttr, error) { + i.mu.RLock() + u := i.unstable + i.mu.RUnlock() + return u, nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (i *InodeSimpleAttributes) SetPermissions(ctx context.Context, _ *fs.Inode, p fs.FilePermissions) bool { + i.mu.Lock() + i.unstable.SetPermissions(ctx, p) + i.mu.Unlock() + return true +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *InodeSimpleAttributes) SetOwner(ctx context.Context, _ *fs.Inode, owner fs.FileOwner) error { + i.mu.Lock() + i.unstable.SetOwner(ctx, owner) + i.mu.Unlock() + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *InodeSimpleAttributes) SetTimestamps(ctx context.Context, _ *fs.Inode, ts fs.TimeSpec) error { + i.mu.Lock() + i.unstable.SetTimestamps(ctx, ts) + i.mu.Unlock() + return nil +} + +// AddLink implements fs.InodeOperations.AddLink. +func (i *InodeSimpleAttributes) AddLink() { + i.mu.Lock() + i.unstable.Links++ + i.mu.Unlock() +} + +// DropLink implements fs.InodeOperations.DropLink. +func (i *InodeSimpleAttributes) DropLink() { + i.mu.Lock() + i.unstable.Links-- + i.mu.Unlock() +} + +// StatFS implements fs.InodeOperations.StatFS. +func (i *InodeSimpleAttributes) StatFS(context.Context) (fs.Info, error) { + if i.fsType == 0 { + return fs.Info{}, syserror.ENOSYS + } + return fs.Info{Type: i.fsType}, nil +} + +// NotifyAccess updates the access time. +func (i *InodeSimpleAttributes) NotifyAccess(ctx context.Context) { + i.mu.Lock() + i.unstable.AccessTime = ktime.NowFromContext(ctx) + i.mu.Unlock() +} + +// NotifyModification updates the modification time. +func (i *InodeSimpleAttributes) NotifyModification(ctx context.Context) { + i.mu.Lock() + i.unstable.ModificationTime = ktime.NowFromContext(ctx) + i.mu.Unlock() +} + +// NotifyStatusChange updates the status change time. +func (i *InodeSimpleAttributes) NotifyStatusChange(ctx context.Context) { + i.mu.Lock() + i.unstable.StatusChangeTime = ktime.NowFromContext(ctx) + i.mu.Unlock() +} + +// NotifyModificationAndStatusChange updates the modification and status change +// times. +func (i *InodeSimpleAttributes) NotifyModificationAndStatusChange(ctx context.Context) { + i.mu.Lock() + now := ktime.NowFromContext(ctx) + i.unstable.ModificationTime = now + i.unstable.StatusChangeTime = now + i.mu.Unlock() +} + +// InodeSimpleExtendedAttributes implements +// fs.InodeOperations.{Get,Set,List}Xattr. +// +// +stateify savable +type InodeSimpleExtendedAttributes struct { + // mu protects xattrs. + mu sync.RWMutex `state:"nosave"` + xattrs map[string]string +} + +// GetXattr implements fs.InodeOperations.GetXattr. +func (i *InodeSimpleExtendedAttributes) GetXattr(_ context.Context, _ *fs.Inode, name string, _ uint64) (string, error) { + i.mu.RLock() + value, ok := i.xattrs[name] + i.mu.RUnlock() + if !ok { + return "", syserror.ENOATTR + } + return value, nil +} + +// SetXattr implements fs.InodeOperations.SetXattr. +func (i *InodeSimpleExtendedAttributes) SetXattr(_ context.Context, _ *fs.Inode, name, value string, flags uint32) error { + i.mu.Lock() + defer i.mu.Unlock() + if i.xattrs == nil { + if flags&linux.XATTR_REPLACE != 0 { + return syserror.ENODATA + } + i.xattrs = make(map[string]string) + } + + _, ok := i.xattrs[name] + if ok && flags&linux.XATTR_CREATE != 0 { + return syserror.EEXIST + } + if !ok && flags&linux.XATTR_REPLACE != 0 { + return syserror.ENODATA + } + + i.xattrs[name] = value + return nil +} + +// ListXattr implements fs.InodeOperations.ListXattr. +func (i *InodeSimpleExtendedAttributes) ListXattr(context.Context, *fs.Inode, uint64) (map[string]struct{}, error) { + i.mu.RLock() + names := make(map[string]struct{}, len(i.xattrs)) + for name := range i.xattrs { + names[name] = struct{}{} + } + i.mu.RUnlock() + return names, nil +} + +// RemoveXattr implements fs.InodeOperations.RemoveXattr. +func (i *InodeSimpleExtendedAttributes) RemoveXattr(_ context.Context, _ *fs.Inode, name string) error { + i.mu.Lock() + defer i.mu.Unlock() + if _, ok := i.xattrs[name]; ok { + delete(i.xattrs, name) + return nil + } + return syserror.ENOATTR +} + +// staticFile is a file with static contents. It is returned by +// InodeStaticFileGetter.GetFile. +// +// +stateify savable +type staticFile struct { + FileGenericSeek `state:"nosave"` + FileNoIoctl `state:"nosave"` + FileNoMMap `state:"nosave"` + FileNoSplice `state:"nosave"` + FileNoopFsync `state:"nosave"` + FileNoopFlush `state:"nosave"` + FileNoopRelease `state:"nosave"` + FileNoopWrite `state:"nosave"` + FileNotDirReaddir `state:"nosave"` + FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + FileStaticContentReader +} + +// InodeNoStatFS implement StatFS by retuning ENOSYS. +type InodeNoStatFS struct{} + +// StatFS implements fs.InodeOperations.StatFS. +func (InodeNoStatFS) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syserror.ENOSYS +} + +// InodeStaticFileGetter implements GetFile for a file with static contents. +// +// +stateify savable +type InodeStaticFileGetter struct { + Contents []byte +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *InodeStaticFileGetter) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &staticFile{ + FileStaticContentReader: NewFileStaticContentReader(i.Contents), + }), nil +} + +// InodeNotMappable returns a nil memmap.Mappable. +type InodeNotMappable struct{} + +// Mappable implements fs.InodeOperations.Mappable. +func (InodeNotMappable) Mappable(*fs.Inode) memmap.Mappable { + return nil +} + +// InodeNoopWriteOut is a no-op implementation of fs.InodeOperations.WriteOut. +type InodeNoopWriteOut struct{} + +// WriteOut is a no-op. +func (InodeNoopWriteOut) WriteOut(context.Context, *fs.Inode) error { + return nil +} + +// InodeNotDirectory can be used by Inodes that are not directories. +type InodeNotDirectory struct{} + +// Lookup implements fs.InodeOperations.Lookup. +func (InodeNotDirectory) Lookup(context.Context, *fs.Inode, string) (*fs.Dirent, error) { + return nil, syserror.ENOTDIR +} + +// Create implements fs.InodeOperations.Create. +func (InodeNotDirectory) Create(context.Context, *fs.Inode, string, fs.FileFlags, fs.FilePermissions) (*fs.File, error) { + return nil, syserror.ENOTDIR +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (InodeNotDirectory) CreateLink(context.Context, *fs.Inode, string, string) error { + return syserror.ENOTDIR +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +func (InodeNotDirectory) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (InodeNotDirectory) CreateDirectory(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.ENOTDIR +} + +// Bind implements fs.InodeOperations.Bind. +func (InodeNotDirectory) Bind(context.Context, *fs.Inode, string, transport.BoundEndpoint, fs.FilePermissions) (*fs.Dirent, error) { + return nil, syserror.ENOTDIR +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (InodeNotDirectory) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.ENOTDIR +} + +// Remove implements fs.InodeOperations.Remove. +func (InodeNotDirectory) Remove(context.Context, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (InodeNotDirectory) RemoveDirectory(context.Context, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// Rename implements fs.FileOperations.Rename. +func (InodeNotDirectory) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error { + return syserror.EINVAL +} + +// InodeNotSocket can be used by Inodes that are not sockets. +type InodeNotSocket struct{} + +// BoundEndpoint implements fs.InodeOperations.BoundEndpoint. +func (InodeNotSocket) BoundEndpoint(*fs.Inode, string) transport.BoundEndpoint { + return nil +} + +// InodeNotTruncatable can be used by Inodes that cannot be truncated. +type InodeNotTruncatable struct{} + +// Truncate implements fs.InodeOperations.Truncate. +func (InodeNotTruncatable) Truncate(context.Context, *fs.Inode, int64) error { + return syserror.EINVAL +} + +// InodeIsDirTruncate implements fs.InodeOperations.Truncate for directories. +type InodeIsDirTruncate struct{} + +// Truncate implements fs.InodeOperations.Truncate. +func (InodeIsDirTruncate) Truncate(context.Context, *fs.Inode, int64) error { + return syserror.EISDIR +} + +// InodeNoopTruncate implements fs.InodeOperations.Truncate as a noop. +type InodeNoopTruncate struct{} + +// Truncate implements fs.InodeOperations.Truncate. +func (InodeNoopTruncate) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// InodeNotRenameable can be used by Inodes that cannot be truncated. +type InodeNotRenameable struct{} + +// Rename implements fs.InodeOperations.Rename. +func (InodeNotRenameable) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error { + return syserror.EINVAL +} + +// InodeNotOpenable can be used by Inodes that cannot be opened. +type InodeNotOpenable struct{} + +// GetFile implements fs.InodeOperations.GetFile. +func (InodeNotOpenable) GetFile(context.Context, *fs.Dirent, fs.FileFlags) (*fs.File, error) { + return nil, syserror.EIO +} + +// InodeNotVirtual can be used by Inodes that are not virtual. +type InodeNotVirtual struct{} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (InodeNotVirtual) IsVirtual() bool { + return false +} + +// InodeVirtual can be used by Inodes that are virtual. +type InodeVirtual struct{} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (InodeVirtual) IsVirtual() bool { + return true +} + +// InodeNotSymlink can be used by Inodes that are not symlinks. +type InodeNotSymlink struct{} + +// Readlink implements fs.InodeOperations.Readlink. +func (InodeNotSymlink) Readlink(context.Context, *fs.Inode) (string, error) { + return "", syserror.ENOLINK +} + +// Getlink implements fs.InodeOperations.Getlink. +func (InodeNotSymlink) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + return nil, syserror.ENOLINK +} + +// InodeNoExtendedAttributes can be used by Inodes that do not support +// extended attributes. +type InodeNoExtendedAttributes struct{} + +// GetXattr implements fs.InodeOperations.GetXattr. +func (InodeNoExtendedAttributes) GetXattr(context.Context, *fs.Inode, string, uint64) (string, error) { + return "", syserror.EOPNOTSUPP +} + +// SetXattr implements fs.InodeOperations.SetXattr. +func (InodeNoExtendedAttributes) SetXattr(context.Context, *fs.Inode, string, string, uint32) error { + return syserror.EOPNOTSUPP +} + +// ListXattr implements fs.InodeOperations.ListXattr. +func (InodeNoExtendedAttributes) ListXattr(context.Context, *fs.Inode, uint64) (map[string]struct{}, error) { + return nil, syserror.EOPNOTSUPP +} + +// RemoveXattr implements fs.InodeOperations.RemoveXattr. +func (InodeNoExtendedAttributes) RemoveXattr(context.Context, *fs.Inode, string) error { + return syserror.EOPNOTSUPP +} + +// InodeNoopRelease implements fs.InodeOperations.Release as a noop. +type InodeNoopRelease struct{} + +// Release implements fs.InodeOperations.Release. +func (InodeNoopRelease) Release(context.Context) {} + +// InodeGenericChecker implements fs.InodeOperations.Check with a generic +// implementation. +type InodeGenericChecker struct{} + +// Check implements fs.InodeOperations.Check. +func (InodeGenericChecker) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// InodeDenyWriteChecker implements fs.InodeOperations.Check which denies all +// write operations. +type InodeDenyWriteChecker struct{} + +// Check implements fs.InodeOperations.Check. +func (InodeDenyWriteChecker) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + if p.Write { + return false + } + return fs.ContextCanAccessFile(ctx, inode, p) +} + +//InodeNotAllocatable can be used by Inodes that do not support Allocate(). +type InodeNotAllocatable struct{} + +func (InodeNotAllocatable) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error { + return syserror.EOPNOTSUPP +} + +// InodeNoopAllocate implements fs.InodeOperations.Allocate as a noop. +type InodeNoopAllocate struct{} + +// Allocate implements fs.InodeOperations.Allocate. +func (InodeNoopAllocate) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error { + return nil +} + +// InodeIsDirAllocate implements fs.InodeOperations.Allocate for directories. +type InodeIsDirAllocate struct{} + +// Allocate implements fs.InodeOperations.Allocate. +func (InodeIsDirAllocate) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error { + return syserror.EISDIR +} diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go new file mode 100644 index 000000000..800c8b4e1 --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -0,0 +1,1061 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/time" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/pgalloc" + "gvisor.dev/gvisor/pkg/sentry/platform" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/usermem" +) + +// Lock order (compare the lock order model in mm/mm.go): +// +// CachingInodeOperations.attrMu ("fs locks") +// CachingInodeOperations.mapsMu ("memmap.Mappable locks not taken by Translate") +// CachingInodeOperations.dataMu ("memmap.Mappable locks taken by Translate") +// CachedFileObject locks + +// CachingInodeOperations caches the metadata and content of a CachedFileObject. +// It implements a subset of InodeOperations. As a utility it can be used to +// implement the full set of InodeOperations. Generally it should not be +// embedded to avoid unexpected inherited behavior. +// +// CachingInodeOperations implements Mappable for the CachedFileObject: +// +// - If CachedFileObject.FD returns a value >= 0 then the file descriptor +// will be memory mapped on the host. +// +// - Otherwise, the contents of CachedFileObject are buffered into memory +// managed by the CachingInodeOperations. +// +// Implementations of FileOperations for a CachedFileObject must read and +// write through CachingInodeOperations using Read and Write respectively. +// +// Implementations of InodeOperations.WriteOut must call Sync to write out +// in-memory modifications of data and metadata to the CachedFileObject. +// +// +stateify savable +type CachingInodeOperations struct { + // backingFile is a handle to a cached file object. + backingFile CachedFileObject + + // mfp is used to allocate memory that caches backingFile's contents. + mfp pgalloc.MemoryFileProvider + + // opts contains options. opts is immutable. + opts CachingInodeOperationsOptions + + attrMu sync.Mutex `state:"nosave"` + + // attr is unstable cached metadata. + // + // attr is protected by attrMu. attr.Size is protected by both attrMu and + // dataMu; reading it requires locking either mutex, while mutating it + // requires locking both. + attr fs.UnstableAttr + + // dirtyAttr is metadata that was updated in-place but hasn't yet + // been successfully written out. + // + // dirtyAttr is protected by attrMu. + dirtyAttr fs.AttrMask + + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks mappings of the cached file object into + // memmap.MappingSpaces. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + dataMu sync.RWMutex `state:"nosave"` + + // cache maps offsets into the cached file to offsets into + // mfp.MemoryFile() that store the file's data. + // + // cache is protected by dataMu. + cache FileRangeSet + + // dirty tracks dirty segments in cache. + // + // dirty is protected by dataMu. + dirty DirtySet + + // hostFileMapper caches internal mappings of backingFile.FD(). + hostFileMapper *HostFileMapper + + // refs tracks active references to data in the cache. + // + // refs is protected by dataMu. + refs FrameRefSet +} + +// CachingInodeOperationsOptions configures a CachingInodeOperations. +// +// +stateify savable +type CachingInodeOperationsOptions struct { + // If ForcePageCache is true, use the sentry page cache even if a host file + // descriptor is available. + ForcePageCache bool + + // If LimitHostFDTranslation is true, apply maxFillRange() constraints to + // host file descriptor mappings returned by + // CachingInodeOperations.Translate(). + LimitHostFDTranslation bool +} + +// CachedFileObject is a file that may require caching. +type CachedFileObject interface { + // ReadToBlocksAt reads up to dsts.NumBytes() bytes from the file to dsts, + // starting at offset, and returns the number of bytes read. ReadToBlocksAt + // may return a partial read without an error. + ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) + + // WriteFromBlocksAt writes up to srcs.NumBytes() bytes from srcs to the + // file, starting at offset, and returns the number of bytes written. + // WriteFromBlocksAt may return a partial write without an error. + WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) + + // SetMaskedAttributes sets the attributes in attr that are true in + // mask on the backing file. If the mask contains only ATime or MTime + // and the CachedFileObject has an FD to the file, then this operation + // is a noop unless forceSetTimestamps is true. This avoids an extra + // RPC to the gofer in the open-read/write-close case, when the + // timestamps on the file will be updated by the host kernel for us. + // + // SetMaskedAttributes may be called at any point, regardless of whether + // the file was opened. + SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr, forceSetTimestamps bool) error + + // Allocate allows the caller to reserve disk space for the inode. + // It's equivalent to fallocate(2) with 'mode=0'. + Allocate(ctx context.Context, offset int64, length int64) error + + // Sync instructs the remote filesystem to sync the file to stable storage. + Sync(ctx context.Context) error + + // FD returns a host file descriptor. If it is possible for + // CachingInodeOperations.AddMapping to have ever been called with writable + // = true, the FD must have been opened O_RDWR; otherwise, it may have been + // opened O_RDONLY or O_RDWR. (mmap unconditionally requires that mapped + // files are readable.) If no host file descriptor is available, FD returns + // a negative number. + // + // For any given CachedFileObject, if FD() ever succeeds (returns a + // non-negative number), it must always succeed. + // + // FD is called iff the file has been memory mapped. This implies that + // the file was opened (see fs.InodeOperations.GetFile). + FD() int +} + +// NewCachingInodeOperations returns a new CachingInodeOperations backed by +// a CachedFileObject and its initial unstable attributes. +func NewCachingInodeOperations(ctx context.Context, backingFile CachedFileObject, uattr fs.UnstableAttr, opts CachingInodeOperationsOptions) *CachingInodeOperations { + mfp := pgalloc.MemoryFileProviderFromContext(ctx) + if mfp == nil { + panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider)) + } + return &CachingInodeOperations{ + backingFile: backingFile, + mfp: mfp, + opts: opts, + attr: uattr, + hostFileMapper: NewHostFileMapper(), + } +} + +// Release implements fs.InodeOperations.Release. +func (c *CachingInodeOperations) Release() { + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + c.dataMu.Lock() + defer c.dataMu.Unlock() + + // Something has gone terribly wrong if we're releasing an inode that is + // still memory-mapped. + if !c.mappings.IsEmpty() { + panic(fmt.Sprintf("Releasing CachingInodeOperations with mappings:\n%s", &c.mappings)) + } + + // Drop any cached pages that are still awaiting MemoryFile eviction. (This + // means that MemoryFile no longer needs to evict them.) + mf := c.mfp.MemoryFile() + mf.MarkAllUnevictable(c) + if err := SyncDirtyAll(context.Background(), &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { + panic(fmt.Sprintf("Failed to writeback cached data: %v", err)) + } + c.cache.DropAll(mf) + c.dirty.RemoveAll() +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (c *CachingInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + c.attrMu.Lock() + attr := c.attr + c.attrMu.Unlock() + return attr, nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (c *CachingInodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, perms fs.FilePermissions) bool { + c.attrMu.Lock() + defer c.attrMu.Unlock() + + now := ktime.NowFromContext(ctx) + masked := fs.AttrMask{Perms: true} + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{Perms: perms}, false); err != nil { + return false + } + c.attr.Perms = perms + c.touchStatusChangeTimeLocked(now) + return true +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (c *CachingInodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + if !owner.UID.Ok() && !owner.GID.Ok() { + return nil + } + + c.attrMu.Lock() + defer c.attrMu.Unlock() + + now := ktime.NowFromContext(ctx) + masked := fs.AttrMask{ + UID: owner.UID.Ok(), + GID: owner.GID.Ok(), + } + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{Owner: owner}, false); err != nil { + return err + } + if owner.UID.Ok() { + c.attr.Owner.UID = owner.UID + } + if owner.GID.Ok() { + c.attr.Owner.GID = owner.GID + } + c.touchStatusChangeTimeLocked(now) + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (c *CachingInodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + c.attrMu.Lock() + defer c.attrMu.Unlock() + + // Replace requests to use the "system time" with the current time to + // ensure that cached timestamps remain consistent with the remote + // filesystem. + now := ktime.NowFromContext(ctx) + if ts.ATimeSetSystemTime { + ts.ATime = now + } + if ts.MTimeSetSystemTime { + ts.MTime = now + } + masked := fs.AttrMask{ + AccessTime: !ts.ATimeOmit, + ModificationTime: !ts.MTimeOmit, + } + // Call SetMaskedAttributes with forceSetTimestamps = true to make sure + // the timestamp is updated. + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{AccessTime: ts.ATime, ModificationTime: ts.MTime}, true); err != nil { + return err + } + if !ts.ATimeOmit { + c.attr.AccessTime = ts.ATime + } + if !ts.MTimeOmit { + c.attr.ModificationTime = ts.MTime + } + c.touchStatusChangeTimeLocked(now) + return nil +} + +// Truncate implements fs.InodeOperations.Truncate. +func (c *CachingInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + c.attrMu.Lock() + defer c.attrMu.Unlock() + + // c.attr.Size is protected by both c.attrMu and c.dataMu. + c.dataMu.Lock() + now := ktime.NowFromContext(ctx) + masked := fs.AttrMask{Size: true} + attr := fs.UnstableAttr{Size: size} + if err := c.backingFile.SetMaskedAttributes(ctx, masked, attr, false); err != nil { + c.dataMu.Unlock() + return err + } + oldSize := c.attr.Size + c.attr.Size = size + c.touchModificationAndStatusChangeTimeLocked(now) + + // We drop c.dataMu here so that we can lock c.mapsMu and invalidate + // mappings below. This allows concurrent calls to Read/Translate/etc. + // These functions synchronize with an in-progress Truncate by refusing to + // use cache contents beyond the new c.attr.Size. (We are still holding + // c.attrMu, so we can't race with Truncate/Write.) + c.dataMu.Unlock() + + // Nothing left to do unless shrinking the file. + if size >= oldSize { + return nil + } + + oldpgend := fs.OffsetPageEnd(oldSize) + newpgend := fs.OffsetPageEnd(size) + + // Invalidate past translations of truncated pages. + if newpgend != oldpgend { + c.mapsMu.Lock() + c.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{ + // Compare Linux's mm/truncate.c:truncate_setsize() => + // truncate_pagecache() => + // mm/memory.c:unmap_mapping_range(evencows=1). + InvalidatePrivate: true, + }) + c.mapsMu.Unlock() + } + + // We are now guaranteed that there are no translations of truncated pages, + // and can remove them from the cache. Since truncated pages have been + // removed from the backing file, they should be dropped without being + // written back. + c.dataMu.Lock() + defer c.dataMu.Unlock() + c.cache.Truncate(uint64(size), c.mfp.MemoryFile()) + c.dirty.KeepClean(memmap.MappableRange{uint64(size), oldpgend}) + + return nil +} + +// Allocate implements fs.InodeOperations.Allocate. +func (c *CachingInodeOperations) Allocate(ctx context.Context, offset, length int64) error { + newSize := offset + length + + // c.attr.Size is protected by both c.attrMu and c.dataMu. + c.attrMu.Lock() + defer c.attrMu.Unlock() + c.dataMu.Lock() + defer c.dataMu.Unlock() + + if newSize <= c.attr.Size { + return nil + } + + now := ktime.NowFromContext(ctx) + if err := c.backingFile.Allocate(ctx, offset, length); err != nil { + return err + } + + c.attr.Size = newSize + c.touchModificationAndStatusChangeTimeLocked(now) + return nil +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (c *CachingInodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + c.attrMu.Lock() + + // Write dirty pages back. + c.dataMu.Lock() + err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.mfp.MemoryFile(), c.backingFile.WriteFromBlocksAt) + c.dataMu.Unlock() + if err != nil { + c.attrMu.Unlock() + return err + } + + // SyncDirtyAll above would have grown if needed. On shrinks, the backing + // file is called directly, so size is never needs to be updated. + c.dirtyAttr.Size = false + + // Write out cached attributes. + if err := c.backingFile.SetMaskedAttributes(ctx, c.dirtyAttr, c.attr, false); err != nil { + c.attrMu.Unlock() + return err + } + c.dirtyAttr = fs.AttrMask{} + + c.attrMu.Unlock() + + // Fsync the remote file. + return c.backingFile.Sync(ctx) +} + +// IncLinks increases the link count and updates cached modification time. +func (c *CachingInodeOperations) IncLinks(ctx context.Context) { + c.attrMu.Lock() + c.attr.Links++ + c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx)) + c.attrMu.Unlock() +} + +// DecLinks decreases the link count and updates cached modification time. +func (c *CachingInodeOperations) DecLinks(ctx context.Context) { + c.attrMu.Lock() + c.attr.Links-- + c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx)) + c.attrMu.Unlock() +} + +// TouchAccessTime updates the cached access time in-place to the +// current time. It does not update status change time in-place. See +// mm/filemap.c:do_generic_file_read -> include/linux/h:file_accessed. +func (c *CachingInodeOperations) TouchAccessTime(ctx context.Context, inode *fs.Inode) { + if inode.MountSource.Flags.NoAtime { + return + } + + c.attrMu.Lock() + c.touchAccessTimeLocked(ktime.NowFromContext(ctx)) + c.attrMu.Unlock() +} + +// touchAccesstimeLocked updates the cached access time in-place to the current +// time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchAccessTimeLocked(now time.Time) { + c.attr.AccessTime = now + c.dirtyAttr.AccessTime = true +} + +// TouchModificationAndStatusChangeTime updates the cached modification and +// status change times in-place to the current time. +func (c *CachingInodeOperations) TouchModificationAndStatusChangeTime(ctx context.Context) { + c.attrMu.Lock() + c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx)) + c.attrMu.Unlock() +} + +// touchModificationAndStatusChangeTimeLocked updates the cached modification +// and status change times in-place to the current time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchModificationAndStatusChangeTimeLocked(now time.Time) { + c.attr.ModificationTime = now + c.dirtyAttr.ModificationTime = true + c.attr.StatusChangeTime = now + c.dirtyAttr.StatusChangeTime = true +} + +// TouchStatusChangeTime updates the cached status change time in-place to the +// current time. +func (c *CachingInodeOperations) TouchStatusChangeTime(ctx context.Context) { + c.attrMu.Lock() + c.touchStatusChangeTimeLocked(ktime.NowFromContext(ctx)) + c.attrMu.Unlock() +} + +// touchStatusChangeTimeLocked updates the cached status change time +// in-place to the current time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchStatusChangeTimeLocked(now time.Time) { + c.attr.StatusChangeTime = now + c.dirtyAttr.StatusChangeTime = true +} + +// UpdateUnstable updates the cached unstable attributes. Only non-dirty +// attributes are updated. +func (c *CachingInodeOperations) UpdateUnstable(attr fs.UnstableAttr) { + // All attributes are protected by attrMu. + c.attrMu.Lock() + + if !c.dirtyAttr.Usage { + c.attr.Usage = attr.Usage + } + if !c.dirtyAttr.Perms { + c.attr.Perms = attr.Perms + } + if !c.dirtyAttr.UID { + c.attr.Owner.UID = attr.Owner.UID + } + if !c.dirtyAttr.GID { + c.attr.Owner.GID = attr.Owner.GID + } + if !c.dirtyAttr.AccessTime { + c.attr.AccessTime = attr.AccessTime + } + if !c.dirtyAttr.ModificationTime { + c.attr.ModificationTime = attr.ModificationTime + } + if !c.dirtyAttr.StatusChangeTime { + c.attr.StatusChangeTime = attr.StatusChangeTime + } + if !c.dirtyAttr.Links { + c.attr.Links = attr.Links + } + + // Size requires holding attrMu and dataMu. + c.dataMu.Lock() + if !c.dirtyAttr.Size { + c.attr.Size = attr.Size + } + c.dataMu.Unlock() + + c.attrMu.Unlock() +} + +// Read reads from frames and otherwise directly from the backing file +// into dst starting at offset until dst is full, EOF is reached, or an +// error is encountered. +// +// Read may partially fill dst and return a nil error. +func (c *CachingInodeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if dst.NumBytes() == 0 { + return 0, nil + } + + // Have we reached EOF? We check for this again in + // inodeReadWriter.ReadToBlocks to avoid holding c.attrMu (which would + // serialize reads) or c.dataMu (which would violate lock ordering), but + // check here first (before calling into MM) since reading at EOF is + // common: getting a return value of 0 from a read syscall is the only way + // to detect EOF. + // + // TODO(jamieliu): Separate out c.attr.Size and use atomics instead of + // c.dataMu. + c.dataMu.RLock() + size := c.attr.Size + c.dataMu.RUnlock() + if offset >= size { + return 0, io.EOF + } + + n, err := dst.CopyOutFrom(ctx, &inodeReadWriter{ctx, c, offset}) + // Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed(). + c.TouchAccessTime(ctx, file.Dirent.Inode) + return n, err +} + +// Write writes to frames and otherwise directly to the backing file +// from src starting at offset and until src is empty or an error is +// encountered. +// +// If Write partially fills src, a non-nil error is returned. +func (c *CachingInodeOperations) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // Hot path. Avoid defers. + if src.NumBytes() == 0 { + return 0, nil + } + + c.attrMu.Lock() + // Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time(). + c.touchModificationAndStatusChangeTimeLocked(ktime.NowFromContext(ctx)) + n, err := src.CopyInTo(ctx, &inodeReadWriter{ctx, c, offset}) + c.attrMu.Unlock() + return n, err +} + +type inodeReadWriter struct { + ctx context.Context + c *CachingInodeOperations + offset int64 +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + mem := rw.c.mfp.MemoryFile() + fillCache := !rw.c.useHostPageCache() && mem.ShouldCacheEvictable() + + // Hot path. Avoid defers. + var unlock func() + if fillCache { + rw.c.dataMu.Lock() + unlock = rw.c.dataMu.Unlock + } else { + rw.c.dataMu.RLock() + unlock = rw.c.dataMu.RUnlock + } + + // Compute the range to read. + if rw.offset >= rw.c.attr.Size { + unlock() + return 0, io.EOF + } + end := fs.ReadEndOffset(rw.offset, int64(dsts.NumBytes()), rw.c.attr.Size) + if end == rw.offset { // dsts.NumBytes() == 0? + unlock() + return 0, nil + } + + var done uint64 + seg, gap := rw.c.cache.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings from the cache. + ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + if err != nil { + unlock() + return done, err + } + + // Copy from internal mappings. + n, err := safemem.CopySeq(dsts, ims) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + unlock() + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + gapMR := gap.Range().Intersect(mr) + if fillCache { + // Read into the cache, then re-enter the loop to read from the + // cache. + reqMR := memmap.MappableRange{ + Start: uint64(usermem.Addr(gapMR.Start).RoundDown()), + End: fs.OffsetPageEnd(int64(gapMR.End)), + } + optMR := gap.Range() + err := rw.c.cache.Fill(rw.ctx, reqMR, maxFillRange(reqMR, optMR), mem, usage.PageCache, rw.c.backingFile.ReadToBlocksAt) + mem.MarkEvictable(rw.c, pgalloc.EvictableRange{optMR.Start, optMR.End}) + seg, gap = rw.c.cache.Find(uint64(rw.offset)) + if !seg.Ok() { + unlock() + return done, err + } + // err might have occurred in part of gap.Range() outside + // gapMR. Forget about it for now; if the error matters and + // persists, we'll run into it again in a later iteration of + // this loop. + } else { + // Read directly from the backing file. + dst := dsts.TakeFirst64(gapMR.Length()) + n, err := rw.c.backingFile.ReadToBlocksAt(rw.ctx, dst, gapMR.Start) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + // Partial reads are fine. But we must stop reading. + if n != dst.NumBytes() || err != nil { + unlock() + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), FileRangeGapIterator{} + } + + default: + break + } + } + unlock() + return done, nil +} + +// maybeGrowFile grows the file's size if data has been written past the old +// size. +// +// Preconditions: rw.c.attrMu and rw.c.dataMu bust be locked. +func (rw *inodeReadWriter) maybeGrowFile() { + // If the write ends beyond the file's previous size, it causes the + // file to grow. + if rw.offset > rw.c.attr.Size { + rw.c.attr.Size = rw.offset + rw.c.dirtyAttr.Size = true + } + if rw.offset > rw.c.attr.Usage { + // This is incorrect if CachingInodeOperations is caching a sparse + // file. (In Linux, keeping inode::i_blocks up to date is the + // filesystem's responsibility.) + rw.c.attr.Usage = rw.offset + rw.c.dirtyAttr.Usage = true + } +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +// +// Preconditions: rw.c.attrMu must be locked. +func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + // Hot path. Avoid defers. + rw.c.dataMu.Lock() + + // Compute the range to write. + end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes())) + if end == rw.offset { // srcs.NumBytes() == 0? + rw.c.dataMu.Unlock() + return 0, nil + } + + mf := rw.c.mfp.MemoryFile() + var done uint64 + seg, gap := rw.c.cache.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok() && seg.Start() < mr.End: + // Get internal mappings from the cache. + segMR := seg.Range().Intersect(mr) + ims, err := mf.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + if err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() + return done, err + } + + // Copy to internal mappings. + n, err := safemem.CopySeq(ims, srcs) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + rw.c.dirty.MarkDirty(segMR) + if err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok() && gap.Start() < mr.End: + // Write directly to the backing file. At present, we never fill + // the cache when writing, since doing so can convert small writes + // into inefficient read-modify-write cycles, and we have no + // mechanism for detecting or avoiding this. + gapmr := gap.Range().Intersect(mr) + src := srcs.TakeFirst64(gapmr.Length()) + n, err := rw.c.backingFile.WriteFromBlocksAt(rw.ctx, src, gapmr.Start) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + // Partial writes are fine. But we must stop writing. + if n != src.NumBytes() || err != nil { + rw.maybeGrowFile() + rw.c.dataMu.Unlock() + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), FileRangeGapIterator{} + + default: + break + } + } + rw.maybeGrowFile() + rw.c.dataMu.Unlock() + return done, nil +} + +// useHostPageCache returns true if c uses c.backingFile.FD() for all file I/O +// and memory mappings, and false if c.cache may contain data cached from +// c.backingFile. +func (c *CachingInodeOperations) useHostPageCache() bool { + return !c.opts.ForcePageCache && c.backingFile.FD() >= 0 +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { + // Hot path. Avoid defers. + c.mapsMu.Lock() + mapped := c.mappings.AddMapping(ms, ar, offset, writable) + // Do this unconditionally since whether we have c.backingFile.FD() >= 0 + // can change across save/restore. + for _, r := range mapped { + c.hostFileMapper.IncRefOn(r) + } + if !c.useHostPageCache() { + // c.Evict() will refuse to evict memory-mapped pages, so tell the + // MemoryFile to not bother trying. + mf := c.mfp.MemoryFile() + for _, r := range mapped { + mf.MarkUnevictable(c, pgalloc.EvictableRange{r.Start, r.End}) + } + } + c.mapsMu.Unlock() + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { + // Hot path. Avoid defers. + c.mapsMu.Lock() + unmapped := c.mappings.RemoveMapping(ms, ar, offset, writable) + for _, r := range unmapped { + c.hostFileMapper.DecRefOn(r) + } + if c.useHostPageCache() { + c.mapsMu.Unlock() + return + } + + // Pages that are no longer referenced by any application memory mappings + // are now considered unused; allow MemoryFile to evict them when + // necessary. + mf := c.mfp.MemoryFile() + c.dataMu.Lock() + for _, r := range unmapped { + // Since these pages are no longer mapped, they are no longer + // concurrently dirtyable by a writable memory mapping. + c.dirty.AllowClean(r) + mf.MarkEvictable(c, pgalloc.EvictableRange{r.Start, r.End}) + } + c.dataMu.Unlock() + c.mapsMu.Unlock() +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { + return c.AddMapping(ctx, ms, dstAR, offset, writable) +} + +// Translate implements memmap.Mappable.Translate. +func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + // Hot path. Avoid defer. + if c.useHostPageCache() { + mr := optional + if c.opts.LimitHostFDTranslation { + mr = maxFillRange(required, optional) + } + return []memmap.Translation{ + { + Source: mr, + File: c, + Offset: mr.Start, + Perms: usermem.AnyAccess, + }, + }, nil + } + + c.dataMu.Lock() + + // Constrain translations to c.attr.Size (rounded up) to prevent + // translation to pages that may be concurrently truncated. + pgend := fs.OffsetPageEnd(c.attr.Size) + var beyondEOF bool + if required.End > pgend { + if required.Start >= pgend { + c.dataMu.Unlock() + return nil, &memmap.BusError{io.EOF} + } + beyondEOF = true + required.End = pgend + } + if optional.End > pgend { + optional.End = pgend + } + + mf := c.mfp.MemoryFile() + cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mf, usage.PageCache, c.backingFile.ReadToBlocksAt) + + var ts []memmap.Translation + var translatedEnd uint64 + for seg := c.cache.FindSegment(required.Start); seg.Ok() && seg.Start() < required.End; seg, _ = seg.NextNonEmpty() { + segMR := seg.Range().Intersect(optional) + // TODO(jamieliu): Make Translations writable even if writability is + // not required if already kept-dirty by another writable translation. + perms := usermem.AccessType{ + Read: true, + Execute: true, + } + if at.Write { + // From this point forward, this memory can be dirtied through the + // mapping at any time. + c.dirty.KeepDirty(segMR) + perms.Write = true + } + ts = append(ts, memmap.Translation{ + Source: segMR, + File: mf, + Offset: seg.FileRangeOf(segMR).Start, + Perms: perms, + }) + translatedEnd = segMR.End + } + + c.dataMu.Unlock() + + // Don't return the error returned by c.cache.Fill if it occurred outside + // of required. + if translatedEnd < required.End && cerr != nil { + return ts, &memmap.BusError{cerr} + } + if beyondEOF { + return ts, &memmap.BusError{io.EOF} + } + return ts, nil +} + +func maxFillRange(required, optional memmap.MappableRange) memmap.MappableRange { + const maxReadahead = 64 << 10 // 64 KB, chosen arbitrarily + if required.Length() >= maxReadahead { + return required + } + if optional.Length() <= maxReadahead { + return optional + } + optional.Start = required.Start + if optional.Length() <= maxReadahead { + return optional + } + optional.End = optional.Start + maxReadahead + return optional +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (c *CachingInodeOperations) InvalidateUnsavable(ctx context.Context) error { + // Whether we have a host fd (and consequently what platform.File is + // mapped) can change across save/restore, so invalidate all translations + // unconditionally. + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + c.mappings.InvalidateAll(memmap.InvalidateOpts{}) + + // Sync the cache's contents so that if we have a host fd after restore, + // the remote file's contents are coherent. + mf := c.mfp.MemoryFile() + c.dataMu.Lock() + defer c.dataMu.Unlock() + if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { + return err + } + + // Discard the cache so that it's not stored in saved state. This is safe + // because per InvalidateUnsavable invariants, no new translations can have + // been returned after we invalidated all existing translations above. + c.cache.DropAll(mf) + c.dirty.RemoveAll() + + return nil +} + +// NotifyChangeFD must be called after the file description represented by +// CachedFileObject.FD() changes. +func (c *CachingInodeOperations) NotifyChangeFD() error { + // Update existing sentry mappings to refer to the new file description. + if err := c.hostFileMapper.RegenerateMappings(c.backingFile.FD()); err != nil { + return err + } + + // Shoot down existing application mappings of the old file description; + // they will be remapped with the new file description on demand. + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + + c.mappings.InvalidateAll(memmap.InvalidateOpts{}) + return nil +} + +// Evict implements pgalloc.EvictableMemoryUser.Evict. +func (c *CachingInodeOperations) Evict(ctx context.Context, er pgalloc.EvictableRange) { + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + c.dataMu.Lock() + defer c.dataMu.Unlock() + + mr := memmap.MappableRange{er.Start, er.End} + mf := c.mfp.MemoryFile() + // Only allow pages that are no longer memory-mapped to be evicted. + for mgap := c.mappings.LowerBoundGap(mr.Start); mgap.Ok() && mgap.Start() < mr.End; mgap = mgap.NextGap() { + mgapMR := mgap.Range().Intersect(mr) + if mgapMR.Length() == 0 { + continue + } + if err := SyncDirty(ctx, mgapMR, &c.cache, &c.dirty, uint64(c.attr.Size), mf, c.backingFile.WriteFromBlocksAt); err != nil { + log.Warningf("Failed to writeback cached data %v: %v", mgapMR, err) + } + c.cache.Drop(mgapMR, mf) + c.dirty.KeepClean(mgapMR) + } +} + +// IncRef implements platform.File.IncRef. This is used when we directly map an +// underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) IncRef(fr platform.FileRange) { + // Hot path. Avoid defers. + c.dataMu.Lock() + seg, gap := c.refs.Find(fr.Start) + for { + switch { + case seg.Ok() && seg.Start() < fr.End: + seg = c.refs.Isolate(seg, fr) + seg.SetValue(seg.Value() + 1) + seg, gap = seg.NextNonEmpty() + case gap.Ok() && gap.Start() < fr.End: + newRange := gap.Range().Intersect(fr) + usage.MemoryAccounting.Inc(newRange.Length(), usage.Mapped) + seg, gap = c.refs.InsertWithoutMerging(gap, newRange, 1).NextNonEmpty() + default: + c.refs.MergeAdjacent(fr) + c.dataMu.Unlock() + return + } + } +} + +// DecRef implements platform.File.DecRef. This is used when we directly map an +// underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) DecRef(fr platform.FileRange) { + // Hot path. Avoid defers. + c.dataMu.Lock() + seg := c.refs.FindSegment(fr.Start) + + for seg.Ok() && seg.Start() < fr.End { + seg = c.refs.Isolate(seg, fr) + if old := seg.Value(); old == 1 { + usage.MemoryAccounting.Dec(seg.Range().Length(), usage.Mapped) + seg = c.refs.Remove(seg).NextSegment() + } else { + seg.SetValue(old - 1) + seg = seg.NextSegment() + } + } + c.refs.MergeAdjacent(fr) + c.dataMu.Unlock() +} + +// MapInternal implements platform.File.MapInternal. This is used when we +// directly map an underlying host fd and CachingInodeOperations is used as the +// platform.File during translation. +func (c *CachingInodeOperations) MapInternal(fr platform.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { + return c.hostFileMapper.MapInternal(fr, c.backingFile.FD(), at.Write) +} + +// FD implements platform.File.FD. This is used when we directly map an +// underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) FD() int { + return c.backingFile.FD() +} diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go new file mode 100644 index 000000000..1547584c5 --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode_cached_test.go @@ -0,0 +1,389 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "bytes" + "io" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +type noopBackingFile struct{} + +func (noopBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + return dsts.NumBytes(), nil +} + +func (noopBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + return srcs.NumBytes(), nil +} + +func (noopBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr, bool) error { + return nil +} + +func (noopBackingFile) Sync(context.Context) error { + return nil +} + +func (noopBackingFile) FD() int { + return -1 +} + +func (noopBackingFile) Allocate(ctx context.Context, offset int64, length int64) error { + return nil +} + +func TestSetPermissions(t *testing.T) { + ctx := contexttest.Context(t) + + uattr := fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Perms: fs.FilePermsFromMode(0444), + }) + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{}) + defer iops.Release() + + perms := fs.FilePermsFromMode(0777) + if !iops.SetPermissions(ctx, nil, perms) { + t.Fatalf("SetPermissions failed, want success") + } + + // Did permissions change? + if iops.attr.Perms != perms { + t.Fatalf("got perms +%v, want +%v", iops.attr.Perms, perms) + } + + // Did status change time change? + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("got status change time not dirty, want dirty") + } + if iops.attr.StatusChangeTime.Equal(uattr.StatusChangeTime) { + t.Fatalf("got status change time unchanged") + } +} + +func TestSetTimestamps(t *testing.T) { + ctx := contexttest.Context(t) + for _, test := range []struct { + desc string + ts fs.TimeSpec + wantChanged fs.AttrMask + }{ + { + desc: "noop", + ts: fs.TimeSpec{ + ATimeOmit: true, + MTimeOmit: true, + }, + wantChanged: fs.AttrMask{}, + }, + { + desc: "access time only", + ts: fs.TimeSpec{ + ATime: ktime.NowFromContext(ctx), + MTimeOmit: true, + }, + wantChanged: fs.AttrMask{ + AccessTime: true, + }, + }, + { + desc: "modification time only", + ts: fs.TimeSpec{ + ATimeOmit: true, + MTime: ktime.NowFromContext(ctx), + }, + wantChanged: fs.AttrMask{ + ModificationTime: true, + }, + }, + { + desc: "access and modification time", + ts: fs.TimeSpec{ + ATime: ktime.NowFromContext(ctx), + MTime: ktime.NowFromContext(ctx), + }, + wantChanged: fs.AttrMask{ + AccessTime: true, + ModificationTime: true, + }, + }, + { + desc: "system time access and modification time", + ts: fs.TimeSpec{ + ATimeSetSystemTime: true, + MTimeSetSystemTime: true, + }, + wantChanged: fs.AttrMask{ + AccessTime: true, + ModificationTime: true, + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + ctx := contexttest.Context(t) + + epoch := ktime.ZeroTime + uattr := fs.UnstableAttr{ + AccessTime: epoch, + ModificationTime: epoch, + StatusChangeTime: epoch, + } + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{}) + defer iops.Release() + + if err := iops.SetTimestamps(ctx, nil, test.ts); err != nil { + t.Fatalf("SetTimestamps got error %v, want nil", err) + } + if test.wantChanged.AccessTime { + if !iops.attr.AccessTime.After(uattr.AccessTime) { + t.Fatalf("diritied access time did not advance, want %v > %v", iops.attr.AccessTime, uattr.AccessTime) + } + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("dirty access time requires dirty status change time") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not advance") + } + } + if test.wantChanged.ModificationTime { + if !iops.attr.ModificationTime.After(uattr.ModificationTime) { + t.Fatalf("diritied modification time did not advance") + } + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("dirty modification time requires dirty status change time") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not advance") + } + } + }) + } +} + +func TestTruncate(t *testing.T) { + ctx := contexttest.Context(t) + + uattr := fs.UnstableAttr{ + Size: 0, + } + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, CachingInodeOperationsOptions{}) + defer iops.Release() + + if err := iops.Truncate(ctx, nil, uattr.Size); err != nil { + t.Fatalf("Truncate got error %v, want nil", err) + } + var size int64 = 4096 + if err := iops.Truncate(ctx, nil, size); err != nil { + t.Fatalf("Truncate got error %v, want nil", err) + } + if iops.attr.Size != size { + t.Fatalf("Truncate got %d, want %d", iops.attr.Size, size) + } + if !iops.dirtyAttr.ModificationTime || !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("Truncate did not dirty modification and status change time") + } + if !iops.attr.ModificationTime.After(uattr.ModificationTime) { + t.Fatalf("dirtied modification time did not change") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not change") + } +} + +type sliceBackingFile struct { + data []byte +} + +func newSliceBackingFile(data []byte) *sliceBackingFile { + return &sliceBackingFile{data} +} + +func (f *sliceBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + r := safemem.BlockSeqReader{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)} + return r.ReadToBlocks(dsts) +} + +func (f *sliceBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + w := safemem.BlockSeqWriter{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)} + return w.WriteFromBlocks(srcs) +} + +func (*sliceBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr, bool) error { + return nil +} + +func (*sliceBackingFile) Sync(context.Context) error { + return nil +} + +func (*sliceBackingFile) FD() int { + return -1 +} + +func (f *sliceBackingFile) Allocate(ctx context.Context, offset int64, length int64) error { + return syserror.EOPNOTSUPP +} + +type noopMappingSpace struct{} + +// Invalidate implements memmap.MappingSpace.Invalidate. +func (noopMappingSpace) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) { +} + +func anonInode(ctx context.Context) *fs.Inode { + return fs.NewInode(ctx, &SimpleFileInode{ + InodeSimpleAttributes: NewInodeSimpleAttributes(ctx, fs.FileOwnerFromContext(ctx), fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, 0), + }, fs.NewPseudoMountSource(ctx), fs.StableAttr{ + Type: fs.Anonymous, + BlockSize: usermem.PageSize, + }) +} + +func pagesOf(bs ...byte) []byte { + buf := make([]byte, 0, len(bs)*usermem.PageSize) + for _, b := range bs { + buf = append(buf, bytes.Repeat([]byte{b}, usermem.PageSize)...) + } + return buf +} + +func TestRead(t *testing.T) { + ctx := contexttest.Context(t) + + // Construct a 3-page file. + buf := pagesOf('a', 'b', 'c') + file := fs.NewFile(ctx, fs.NewDirent(ctx, anonInode(ctx), "anon"), fs.FileFlags{}, nil) + uattr := fs.UnstableAttr{ + Size: int64(len(buf)), + } + iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, CachingInodeOperationsOptions{}) + defer iops.Release() + + // Expect the cache to be initially empty. + if cached := iops.cache.Span(); cached != 0 { + t.Errorf("Span got %d, want 0", cached) + } + + // Create a memory mapping of the second page (as CachingInodeOperations + // expects to only cache mapped pages), then call Translate to force it to + // be cached. + var ms noopMappingSpace + ar := usermem.AddrRange{usermem.PageSize, 2 * usermem.PageSize} + if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil { + t.Fatalf("AddMapping got %v, want nil", err) + } + mr := memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize} + if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + t.Fatalf("Translate got %v, want nil", err) + } + if cached := iops.cache.Span(); cached != usermem.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, usermem.PageSize) + } + + // Try to read 4 pages. The first and third pages should be read directly + // from the "file", the second page should be read from the cache, and only + // 3 pages (the size of the file) should be readable. + rbuf := make([]byte, 4*usermem.PageSize) + dst := usermem.BytesIOSequence(rbuf) + n, err := iops.Read(ctx, file, dst, 0) + if n != 3*usermem.PageSize || (err != nil && err != io.EOF) { + t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*usermem.PageSize) + } + rbuf = rbuf[:3*usermem.PageSize] + + // Did we get the bytes we expect? + if !bytes.Equal(rbuf, buf) { + t.Errorf("Read back bytes %v, want %v", rbuf, buf) + } + + // Delete the memory mapping before iops.Release(). The cached page will + // either be evicted by ctx's pgalloc.MemoryFile, or dropped by + // iops.Release(). + iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true) +} + +func TestWrite(t *testing.T) { + ctx := contexttest.Context(t) + + // Construct a 4-page file. + buf := pagesOf('a', 'b', 'c', 'd') + orig := append([]byte(nil), buf...) + inode := anonInode(ctx) + uattr := fs.UnstableAttr{ + Size: int64(len(buf)), + } + iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, CachingInodeOperationsOptions{}) + defer iops.Release() + + // Expect the cache to be initially empty. + if cached := iops.cache.Span(); cached != 0 { + t.Errorf("Span got %d, want 0", cached) + } + + // Create a memory mapping of the second and third pages (as + // CachingInodeOperations expects to only cache mapped pages), then call + // Translate to force them to be cached. + var ms noopMappingSpace + ar := usermem.AddrRange{usermem.PageSize, 3 * usermem.PageSize} + if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize, true); err != nil { + t.Fatalf("AddMapping got %v, want nil", err) + } + defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize, true) + mr := memmap.MappableRange{usermem.PageSize, 3 * usermem.PageSize} + if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + t.Fatalf("Translate got %v, want nil", err) + } + if cached := iops.cache.Span(); cached != 2*usermem.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, 2*usermem.PageSize) + } + + // Write to the first 2 pages. + wbuf := pagesOf('e', 'f') + src := usermem.BytesIOSequence(wbuf) + n, err := iops.Write(ctx, src, 0) + if n != 2*usermem.PageSize || err != nil { + t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*usermem.PageSize) + } + + // The first page should have been written directly, since it was not cached. + want := append([]byte(nil), orig...) + copy(want, pagesOf('e')) + if !bytes.Equal(buf, want) { + t.Errorf("File contents are %v, want %v", buf, want) + } + + // Sync back to the "backing file". + if err := iops.WriteOut(ctx, inode); err != nil { + t.Errorf("Sync got %v, want nil", err) + } + + // Now the second page should have been written as well. + copy(want[usermem.PageSize:], pagesOf('f')) + if !bytes.Equal(buf, want) { + t.Errorf("File contents are %v, want %v", buf, want) + } +} diff --git a/pkg/sentry/fs/g3doc/.gitignore b/pkg/sentry/fs/g3doc/.gitignore new file mode 100644 index 000000000..2d19fc766 --- /dev/null +++ b/pkg/sentry/fs/g3doc/.gitignore @@ -0,0 +1 @@ +*.html diff --git a/pkg/sentry/fs/g3doc/fuse.md b/pkg/sentry/fs/g3doc/fuse.md new file mode 100644 index 000000000..2ca84dd74 --- /dev/null +++ b/pkg/sentry/fs/g3doc/fuse.md @@ -0,0 +1,263 @@ +# Foreword + +This document describes an on-going project to support FUSE filesystems within +the sentry. This is intended to become the final documentation for this +subsystem, and is therefore written in the past tense. However FUSE support is +currently incomplete and the document will be updated as things progress. + +# FUSE: Filesystem in Userspace + +The sentry supports dispatching filesystem operations to a FUSE server, allowing +FUSE filesystem to be used with a sandbox. + +## Overview + +FUSE has two main components: + +1. A client kernel driver (canonically `fuse.ko` in Linux), which forwards + filesystem operations (usually initiated by syscalls) to the server. + +2. A server, which is a userspace daemon that implements the actual filesystem. + +The sentry implements the client component, which allows a server daemon running +within the sandbox to implement a filesystem within the sandbox. + +A FUSE filesystem is initialized with `mount(2)`, typically with the help of a +utility like `fusermount(1)`. Various mount options exist for establishing +ownership and access permissions on the filesystem, but the most important mount +option is a file descriptor used to establish communication between the client +and server. + +The FUSE device FD is obtained by opening `/dev/fuse`. During regular operation, +the client and server use the FUSE protocol described in `fuse(4)` to service +filesystem operations. See the "Protocol" section below for more information +about this protocol. The core of the sentry support for FUSE is the client-side +implementation of this protocol. + +## FUSE in the Sentry + +The sentry's FUSE client targets VFS2 and has the following components: + +- An implementation of `/dev/fuse`. + +- A VFS2 filesystem for mapping syscalls to FUSE ops. Since we're targeting + VFS2, one point of contention may be the lack of inodes in VFS2. We can + tentatively implement a kernfs-based filesystem to bridge the gap in APIs. + The kernfs base functionality can serve the role of the Linux inode cache + and, the filesystem can map VFS2 syscalls to kernfs inode operations; see + the `kernfs.Inode` interface. + +The FUSE protocol lends itself well to marshaling with `go_marshal`. The various +request and response packets can be defined in the ABI package and converted to +and from the wire format using `go_marshal`. + +### Design Goals + +- While filesystem performance is always important, the sentry's FUSE support + is primarily concerned with compatibility, with performance as a secondary + concern. + +- Avoiding deadlocks from a hung server daemon. + +- Consider the potential for denial of service from a malicious server daemon. + Protecting itself from userspace is already a design goal for the sentry, + but needs additional consideration for FUSE. Normally, an operating system + doesn't rely on userspace to make progress with filesystem operations. Since + this changes with FUSE, it opens up the possibility of creating a chain of + dependencies controlled by userspace, which could affect an entire sandbox. + For example: a FUSE op can block a syscall, which could be holding a + subsystem lock, which can then block another task goroutine. + +### Milestones + +Below are some broad goals to aim for while implementing FUSE in the sentry. +Many FUSE ops can be grouped into broad categories of functionality, and most +ops can be implemented in parallel. + +#### Minimal client that can mount a trivial FUSE filesystem. + +- Implement `/dev/fuse` - a character device used to establish an FD for + communication between the sentry and the server daemon. + +- Implement basic FUSE ops like `FUSE_INIT`, `FUSE_DESTROY`. + +#### Read-only mount with basic file operations + +- Implement the majority of file, directory and file descriptor FUSE ops. For + this milestone, we can skip uncommon or complex operations like mmap, mknod, + file locking, poll, and extended attributes. We can stub these out along + with any ops that modify the filesystem. The exact list of required ops are + to be determined, but the goal is to mount a real filesystem as read-only, + and be able to read contents from the filesystem in the sentry. + +#### Full read-write support + +- Implement the remaining FUSE ops and decide if we can omit rarely used + operations like ioctl. + +# Appendix + +## FUSE Protocol + +The FUSE protocol is a request-response protocol. All requests are initiated by +the client. The wire-format for the protocol is raw C structs serialized to +memory. + +All FUSE requests begin with the following request header: + +```c +struct fuse_in_header { + uint32_t len; // Length of the request, including this header. + uint32_t opcode; // Requested operation. + uint64_t unique; // A unique identifier for this request. + uint64_t nodeid; // ID of the filesystem object being operated on. + uint32_t uid; // UID of the requesting process. + uint32_t gid; // GID of the requesting process. + uint32_t pid; // PID of the requesting process. + uint32_t padding; +}; +``` + +The request is then followed by a payload specific to the `opcode`. + +All responses begin with this response header: + +```c +struct fuse_out_header { + uint32_t len; // Length of the response, including this header. + int32_t error; // Status of the request, 0 if success. + uint64_t unique; // The unique identifier from the corresponding request. +}; +``` + +The response payload also depends on the request `opcode`. If `error != 0`, the +response payload must be empty. + +### Operations + +The following is a list of all FUSE operations used in `fuse_in_header.opcode` +as of Linux v4.4, and a brief description of their purpose. These are defined in +`uapi/linux/fuse.h`. Many of these have a corresponding request and response +payload struct; `fuse(4)` has details for some of these. We also note how these +operations map to the sentry virtual filesystem. + +#### FUSE meta-operations + +These operations are specific to FUSE and don't have a corresponding action in a +generic filesystem. + +- `FUSE_INIT`: This operation initializes a new FUSE filesystem, and is the + first message sent by the client after mount. This is used for version and + feature negotiation. This is related to `mount(2)`. +- `FUSE_DESTROY`: Teardown a FUSE filesystem, related to `unmount(2)`. +- `FUSE_INTERRUPT`: Interrupts an in-flight operation, specified by the + `fuse_in_header.unique` value provided in the corresponding request header. + The client can send at most one of these per request, and will enter an + uninterruptible wait for a reply. The server is expected to reply promptly. +- `FUSE_FORGET`: A hint to the server that server should evict the indicate + node from any caches. This is wired up to `(struct + super_operations).evict_inode` in Linux, which is in turned hooked as the + inode cache shrinker which is typically triggered by system memory pressure. +- `FUSE_BATCH_FORGET`: Batch version of `FUSE_FORGET`. + +#### Filesystem Syscalls + +These FUSE ops map directly to an equivalent filesystem syscall, or family of +syscalls. The relevant syscalls have a similar name to the operation, unless +otherwise noted. + +Node creation: + +- `FUSE_MKNOD` +- `FUSE_MKDIR` +- `FUSE_CREATE`: This is equivalent to `open(2)` and `creat(2)`, which + atomically creates and opens a node. + +Node attributes and extended attributes: + +- `FUSE_GETATTR` +- `FUSE_SETATTR` +- `FUSE_SETXATTR` +- `FUSE_GETXATTR` +- `FUSE_LISTXATTR` +- `FUSE_REMOVEXATTR` + +Node link manipulation: + +- `FUSE_READLINK` +- `FUSE_LINK` +- `FUSE_SYMLINK` +- `FUSE_UNLINK` + +Directory operations: + +- `FUSE_RMDIR` +- `FUSE_RENAME` +- `FUSE_RENAME2` +- `FUSE_OPENDIR`: `open(2)` for directories. +- `FUSE_RELEASEDIR`: `close(2)` for directories. +- `FUSE_READDIR` +- `FUSE_READDIRPLUS` +- `FUSE_FSYNCDIR`: `fsync(2)` for directories. +- `FUSE_LOOKUP`: Establishes a unique identifier for a FS node. This is + reminiscent of `VirtualFilesystem.GetDentryAt` in that it resolves a path + component to a node. However the returned identifier is opaque to the + client. The server must remember this mapping, as this is how the client + will reference the node in the future. + +File operations: + +- `FUSE_OPEN`: `open(2)` for files. +- `FUSE_RELEASE`: `close(2)` for files. +- `FUSE_FSYNC` +- `FUSE_FALLOCATE` +- `FUSE_SETUPMAPPING`: Creates a memory map on a file for `mmap(2)`. +- `FUSE_REMOVEMAPPING`: Removes a memory map for `munmap(2)`. + +File locking: + +- `FUSE_GETLK` +- `FUSE_SETLK` +- `FUSE_SETLKW` +- `FUSE_COPY_FILE_RANGE` + +File descriptor operations: + +- `FUSE_IOCTL` +- `FUSE_POLL` +- `FUSE_LSEEK` + +Filesystem operations: + +- `FUSE_STATFS` + +#### Permissions + +- `FUSE_ACCESS` is used to check if a node is accessible, as part of many + syscall implementations. Maps to `vfs.FilesystemImpl.AccessAt` in the + sentry. + +#### I/O Operations + +These ops are used to read and write file pages. They're used to implement both +I/O syscalls like `read(2)`, `write(2)` and `mmap(2)`. + +- `FUSE_READ` +- `FUSE_WRITE` + +#### Miscellaneous + +- `FUSE_FLUSH`: Used by the client to indicate when a file descriptor is + closed. Distinct from `FUSE_FSYNC`, which corresponds to an `fsync(2)` + syscall from the user. Maps to `vfs.FileDescriptorImpl.Release` in the + sentry. +- `FUSE_BMAP`: Old address space API for block defrag. Probably not needed. +- `FUSE_NOTIFY_REPLY`: [TODO: what does this do?] + +# References + +- [fuse(4) Linux manual page](https://www.man7.org/linux/man-pages/man4/fuse.4.html) +- [Linux kernel FUSE documentation](https://www.kernel.org/doc/html/latest/filesystems/fuse.html) +- [The reference implementation of the Linux FUSE (Filesystem in Userspace) + interface](https://github.com/libfuse/libfuse) +- [The kernel interface of FUSE](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fuse.h) diff --git a/pkg/sentry/fs/g3doc/inotify.md b/pkg/sentry/fs/g3doc/inotify.md new file mode 100644 index 000000000..85063d4e6 --- /dev/null +++ b/pkg/sentry/fs/g3doc/inotify.md @@ -0,0 +1,122 @@ +# Inotify + +Inotify implements the like-named filesystem event notification system for the +sentry, see `inotify(7)`. + +## Architecture + +For the most part, the sentry implementation of inotify mirrors the Linux +architecture. Inotify instances (i.e. the fd returned by inotify_init(2)) are +backed by a pseudo-filesystem. Events are generated from various places in the +sentry, including the [syscall layer][syscall_dir], the [vfs layer][dirent] and +the [process fd table][fd_table]. Watches are stored in inodes and generated +events are queued to the inotify instance owning the watches for delivery to the +user. + +## Objects + +Here is a brief description of the existing and new objects involved in the +sentry inotify mechanism, and how they interact: + +### [`fs.Inotify`][inotify] + +- An inotify instances, created by inotify_init(2)/inotify_init1(2). +- The inotify fd has a `fs.Dirent`, supports filesystem syscalls to read + events. +- Has multiple `fs.Watch`es, with at most one watch per target inode, per + inotify instance. +- Has an instance `id` which is globally unique. This is *not* the fd number + for this instance, since the fd can be duped. This `id` is not externally + visible. + +### [`fs.Watch`][watch] + +- An inotify watch, created/deleted by + inotify_add_watch(2)/inotify_rm_watch(2). +- Owned by an `fs.Inotify` instance, each watch keeps a pointer to the + `owner`. +- Associated with a single `fs.Inode`, which is the watch `target`. While the + watch is active, it indirectly pins `target` to memory. See the "Reference + Model" section for a detailed explanation. +- Filesystem operations on `target` generate `fs.Event`s. + +### [`fs.Event`][event] + +- A simple struct encapsulating all the fields for an inotify event. +- Generated by `fs.Watch`es and forwarded to the watches' `owner`s. +- Serialized to the user during read(2) syscalls on the associated + `fs.Inotify`'s fd. + +### [`fs.Dirent`][dirent] + +- Many inotify events are generated inside dirent methods. Events are + generated in the dirent methods rather than `fs.Inode` methods because some + events carry the name of the subject node, and node names are generally + unavailable in an `fs.Inode`. +- Dirents do not directly contain state for any watches. Instead, they forward + notifications to the underlying `fs.Inode`. + +### [`fs.Inode`][inode] + +- Interacts with inotify through `fs.Watch`es. +- Inodes contain a map of all active `fs.Watch`es on them. +- An `fs.Inotify` instance can have at most one `fs.Watch` per inode. + `fs.Watch`es on an inode are indexed by their `owner`'s `id`. +- All inotify logic is encapsulated in the [`Watches`][inode_watches] struct + in an inode. Logically, `Watches` is the set of inotify watches on the + inode. + +## Reference Model + +The sentry inotify implementation has a complex reference model. An inotify +watch observes a single inode. For efficient lookup, the state for a watch is +stored directly on the target inode. This state needs to be persistent for the +lifetime of watch. Unlike usual filesystem metadata, the watch state has no +"on-disk" representation, so they cannot be reconstructed by the filesystem if +the inode is flushed from memory. This effectively means we need to keep any +inodes with actives watches pinned to memory. + +We can't just hold an extra ref on the inode to pin it to memory because some +filesystems (such as gofer-based filesystems) don't have persistent inodes. In +such a filesystem, if we just pin the inode, nothing prevents the enclosing +dirent from being GCed. Once the dirent is GCed, the pinned inode is +unreachable -- these filesystems generate a new inode by re-reading the node +state on the next walk. Incidentally, hardlinks also don't work on these +filesystems for this reason. + +To prevent the above scenario, when a new watch is added on an inode, we *pin* +the dirent we used to reach the inode. Note that due to hardlinks, this dirent +may not be the only dirent pointing to the inode. Attempting to set an inotify +watch via multiple hardlinks to the same file results in the same watch being +returned for both links. However, for each new dirent we use to reach the same +inode, we add a new pin. We need a new pin for each new dirent used to reach the +inode because we have no guarantees about the deletion order of the different +links to the inode. + +## Lock Ordering + +There are 4 locks related to the inotify implementation: + +- `Inotify.mu`: the inotify instance lock. +- `Inotify.evMu`: the inotify event queue lock. +- `Watch.mu`: the watch lock, used to protect pins. +- `fs.Watches.mu`: the inode watch set mu, used to protect the collection of + watches on the inode. + +The correct lock ordering for inotify code is: + +`Inotify.mu` -> `fs.Watches.mu` -> `Watch.mu` -> `Inotify.evMu`. + +We need a distinct lock for the event queue because by the time a goroutine +attempts to queue a new event, it is already holding `fs.Watches.mu`. If we used +`Inotify.mu` to also protect the event queue, this would violate the above lock +ordering. + +[dirent]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/dirent.go +[event]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify_event.go +[fd_table]: https://github.com/google/gvisor/blob/master/pkg/sentry/kernel/fd_table.go +[inode]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inode.go +[inode_watches]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inode_inotify.go +[inotify]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify.go +[syscall_dir]: https://github.com/google/gvisor/blob/master/pkg/sentry/syscalls/linux/ +[watch]: https://github.com/google/gvisor/blob/master/pkg/sentry/fs/inotify_watch.go diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD new file mode 100644 index 000000000..fea135eea --- /dev/null +++ b/pkg/sentry/fs/gofer/BUILD @@ -0,0 +1,67 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "gofer", + srcs = [ + "attr.go", + "cache_policy.go", + "context_file.go", + "device.go", + "fifo.go", + "file.go", + "file_state.go", + "fs.go", + "handles.go", + "inode.go", + "inode_state.go", + "path.go", + "session.go", + "session_state.go", + "socket.go", + "util.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/fd", + "//pkg/log", + "//pkg/metric", + "//pkg/p9", + "//pkg/refs", + "//pkg/safemem", + "//pkg/secio", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fdpipe", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/host", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/pipe", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/socket/unix/transport", + "//pkg/sync", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/unet", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "gofer_test", + size = "small", + srcs = ["gofer_test.go"], + library = ":gofer", + deps = [ + "//pkg/context", + "//pkg/p9", + "//pkg/p9/p9test", + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + ], +) diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go new file mode 100644 index 000000000..d481baf77 --- /dev/null +++ b/pkg/sentry/fs/gofer/attr.go @@ -0,0 +1,172 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/usermem" +) + +// getattr returns the 9p attributes of the p9.File. On success, Mode, Size, and RDev +// are guaranteed to be masked as valid. +func getattr(ctx context.Context, file contextFile) (p9.QID, p9.AttrMask, p9.Attr, error) { + // Retrieve attributes over the wire. + qid, valid, attr, err := file.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + return qid, valid, attr, err + } + + // Require mode, size, and raw device id. + if !valid.Mode || !valid.Size || !valid.RDev { + return qid, valid, attr, syscall.EIO + } + + return qid, valid, attr, nil +} + +func unstable(ctx context.Context, valid p9.AttrMask, pattr p9.Attr, mounter fs.FileOwner, client *p9.Client) fs.UnstableAttr { + return fs.UnstableAttr{ + Size: int64(pattr.Size), + Usage: int64(pattr.Size), + Perms: perms(valid, pattr, client), + Owner: owner(mounter, valid, pattr), + AccessTime: atime(ctx, valid, pattr), + ModificationTime: mtime(ctx, valid, pattr), + StatusChangeTime: ctime(ctx, valid, pattr), + Links: links(valid, pattr), + } +} + +func perms(valid p9.AttrMask, pattr p9.Attr, client *p9.Client) fs.FilePermissions { + if pattr.Mode.IsDir() && !p9.VersionSupportsMultiUser(client.Version()) { + // If user and group permissions bits are not supplied, use + // "other" bits to supplement them. + // + // Older Gofer's fake directories only have "other" permission, + // but will often be accessed via user or group permissions. + if pattr.Mode&0770 == 0 { + other := pattr.Mode & 07 + pattr.Mode = pattr.Mode | other<<3 | other<<6 + } + } + return fs.FilePermsFromP9(pattr.Mode) +} + +func owner(mounter fs.FileOwner, valid p9.AttrMask, pattr p9.Attr) fs.FileOwner { + // Unless the file returned its UID and GID, it belongs to the mounting + // task's EUID/EGID. + owner := mounter + if valid.UID { + if pattr.UID.Ok() { + owner.UID = auth.KUID(pattr.UID) + } else { + owner.UID = auth.KUID(auth.OverflowUID) + } + } + if valid.GID { + if pattr.GID.Ok() { + owner.GID = auth.KGID(pattr.GID) + } else { + owner.GID = auth.KGID(auth.OverflowGID) + } + } + return owner +} + +// bsize returns a block size from 9p attributes. +func bsize(pattr p9.Attr) int64 { + if pattr.BlockSize > 0 { + return int64(pattr.BlockSize) + } + // Some files, particularly those that are not on a local file system, + // may have no clue of their block size. Better not to report something + // misleading or buggy and have a safe default. + return usermem.PageSize +} + +// ntype returns an fs.InodeType from 9p attributes. +func ntype(pattr p9.Attr) fs.InodeType { + switch { + case pattr.Mode.IsNamedPipe(): + return fs.Pipe + case pattr.Mode.IsDir(): + return fs.Directory + case pattr.Mode.IsSymlink(): + return fs.Symlink + case pattr.Mode.IsCharacterDevice(): + return fs.CharacterDevice + case pattr.Mode.IsBlockDevice(): + return fs.BlockDevice + case pattr.Mode.IsSocket(): + return fs.Socket + case pattr.Mode.IsRegular(): + fallthrough + default: + return fs.RegularFile + } +} + +// ctime returns a change time from 9p attributes. +func ctime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.CTime { + return ktime.FromUnix(int64(pattr.CTimeSeconds), int64(pattr.CTimeNanoSeconds)) + } + // Approximate ctime with mtime if ctime isn't available. + return mtime(ctx, valid, pattr) +} + +// atime returns an access time from 9p attributes. +func atime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.ATime { + return ktime.FromUnix(int64(pattr.ATimeSeconds), int64(pattr.ATimeNanoSeconds)) + } + return ktime.NowFromContext(ctx) +} + +// mtime returns a modification time from 9p attributes. +func mtime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.MTime { + return ktime.FromUnix(int64(pattr.MTimeSeconds), int64(pattr.MTimeNanoSeconds)) + } + return ktime.NowFromContext(ctx) +} + +// links returns a hard link count from 9p attributes. +func links(valid p9.AttrMask, pattr p9.Attr) uint64 { + // For gofer file systems that support link count (such as a local file gofer), + // we return the link count reported by the underlying file system. + if valid.NLink { + return pattr.NLink + } + + // This node is likely backed by a file system that doesn't support links. + // + // We could readdir() and count children directories to provide an accurate + // link count. However this may be expensive since the gofer may be backed by remote + // storage. Instead, simply return 2 links for directories and 1 for everything else + // since no one relies on an accurate link count for gofer-based file systems. + switch ntype(pattr) { + case fs.Directory: + return 2 + default: + return 1 + } +} diff --git a/pkg/sentry/fs/gofer/cache_policy.go b/pkg/sentry/fs/gofer/cache_policy.go new file mode 100644 index 000000000..07a564e92 --- /dev/null +++ b/pkg/sentry/fs/gofer/cache_policy.go @@ -0,0 +1,186 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// cachePolicy is a 9p cache policy. It has methods that determine what to +// cache (if anything) for a given inode. +type cachePolicy int + +const ( + // Cache nothing. + cacheNone cachePolicy = iota + + // Use virtual file system cache for everything. + cacheAll + + // Use virtual file system cache for everything, but send writes to the + // fs agent immediately. + cacheAllWritethrough + + // Use the (host) page cache for reads/writes, but don't cache anything + // else. This allows the sandbox filesystem to stay in sync with any + // changes to the remote filesystem. + // + // This policy should *only* be used with remote filesystems that + // donate their host FDs to the sandbox and thus use the host page + // cache, otherwise the dirent state will be inconsistent. + cacheRemoteRevalidating +) + +// String returns the string name of the cache policy. +func (cp cachePolicy) String() string { + switch cp { + case cacheNone: + return "cacheNone" + case cacheAll: + return "cacheAll" + case cacheAllWritethrough: + return "cacheAllWritethrough" + case cacheRemoteRevalidating: + return "cacheRemoteRevalidating" + default: + return "unknown" + } +} + +func parseCachePolicy(policy string) (cachePolicy, error) { + switch policy { + case "fscache": + return cacheAll, nil + case "none": + return cacheNone, nil + case "fscache_writethrough": + return cacheAllWritethrough, nil + case "remote_revalidating": + return cacheRemoteRevalidating, nil + } + return cacheNone, fmt.Errorf("unsupported cache mode: %s", policy) +} + +// cacheUAtters determines whether unstable attributes should be cached for the +// given inode. +func (cp cachePolicy) cacheUAttrs(inode *fs.Inode) bool { + if !fs.IsFile(inode.StableAttr) && !fs.IsDir(inode.StableAttr) { + return false + } + return cp == cacheAll || cp == cacheAllWritethrough +} + +// cacheReaddir determines whether readdir results should be cached. +func (cp cachePolicy) cacheReaddir() bool { + return cp == cacheAll || cp == cacheAllWritethrough +} + +// useCachingInodeOps determines whether the page cache should be used for the +// given inode. If the remote filesystem donates host FDs to the sentry, then +// the host kernel's page cache will be used, otherwise we will use a +// sentry-internal page cache. +func (cp cachePolicy) useCachingInodeOps(inode *fs.Inode) bool { + // Do cached IO for regular files only. Some "character devices" expect + // no caching. + if !fs.IsFile(inode.StableAttr) { + return false + } + return cp == cacheAll || cp == cacheAllWritethrough +} + +// writeThough indicates whether writes to the file should be synced to the +// gofer immediately. +func (cp cachePolicy) writeThrough(inode *fs.Inode) bool { + return cp == cacheNone || cp == cacheAllWritethrough +} + +// revalidate revalidates the child Inode if the cache policy allows it. +// +// Depending on the cache policy, revalidate will walk from the parent to the +// child inode, and if any unstable attributes have changed, will update the +// cached attributes on the child inode. If the walk fails, or the returned +// inode id is different from the one being revalidated, then the entire Dirent +// must be reloaded. +func (cp cachePolicy) revalidate(ctx context.Context, name string, parent, child *fs.Inode) bool { + if cp == cacheAll || cp == cacheAllWritethrough { + return false + } + + if cp == cacheNone { + return true + } + + childIops, ok := child.InodeOperations.(*inodeOperations) + if !ok { + if _, ok := child.InodeOperations.(*fifo); ok { + return false + } + panic(fmt.Sprintf("revalidating inode operations of unknown type %T", child.InodeOperations)) + } + parentIops, ok := parent.InodeOperations.(*inodeOperations) + if !ok { + panic(fmt.Sprintf("revalidating inode operations with parent of unknown type %T", parent.InodeOperations)) + } + + // Walk from parent to child again. + // + // TODO(b/112031682): If we have a directory FD in the parent + // inodeOperations, then we can use fstatat(2) to get the inode + // attributes instead of making this RPC. + qids, f, mask, attr, err := parentIops.fileState.file.walkGetAttr(ctx, []string{name}) + if err != nil { + // Can't look up the name. Trigger reload. + return true + } + f.close(ctx) + + // If the Path has changed, then we are not looking at the file file. + // We must reload. + if qids[0].Path != childIops.fileState.key.Inode { + return true + } + + // If we are not caching unstable attrs, then there is nothing to + // update on this inode. + if !cp.cacheUAttrs(child) { + return false + } + + // Update the inode's cached unstable attrs. + s := childIops.session() + childIops.cachingInodeOps.UpdateUnstable(unstable(ctx, mask, attr, s.mounter, s.client)) + + return false +} + +// keep indicates that dirents should be kept pinned in the dirent tree even if +// there are no application references on the file. +func (cp cachePolicy) keep(d *fs.Dirent) bool { + if cp == cacheNone { + return false + } + sattr := d.Inode.StableAttr + // NOTE(b/31979197): Only cache files, directories, and symlinks. + return fs.IsFile(sattr) || fs.IsDir(sattr) || fs.IsSymlink(sattr) +} + +// cacheNegativeDirents indicates that negative dirents should be held in the +// dirent tree. +func (cp cachePolicy) cacheNegativeDirents() bool { + return cp == cacheAll || cp == cacheAllWritethrough +} diff --git a/pkg/sentry/fs/gofer/context_file.go b/pkg/sentry/fs/gofer/context_file.go new file mode 100644 index 000000000..125907d70 --- /dev/null +++ b/pkg/sentry/fs/gofer/context_file.go @@ -0,0 +1,218 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/p9" +) + +// contextFile is a wrapper around p9.File that notifies the context that +// it's about to sleep before calling the Gofer over P9. +type contextFile struct { + file p9.File +} + +func (c *contextFile) walk(ctx context.Context, names []string) ([]p9.QID, contextFile, error) { + ctx.UninterruptibleSleepStart(false) + + q, f, err := c.file.Walk(names) + if err != nil { + ctx.UninterruptibleSleepFinish(false) + return nil, contextFile{}, err + } + ctx.UninterruptibleSleepFinish(false) + return q, contextFile{file: f}, nil +} + +func (c *contextFile) statFS(ctx context.Context) (p9.FSStat, error) { + ctx.UninterruptibleSleepStart(false) + s, err := c.file.StatFS() + ctx.UninterruptibleSleepFinish(false) + return s, err +} + +func (c *contextFile) getAttr(ctx context.Context, req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + ctx.UninterruptibleSleepStart(false) + q, m, a, err := c.file.GetAttr(req) + ctx.UninterruptibleSleepFinish(false) + return q, m, a, err +} + +func (c *contextFile) setAttr(ctx context.Context, valid p9.SetAttrMask, attr p9.SetAttr) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.SetAttr(valid, attr) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) getXattr(ctx context.Context, name string, size uint64) (string, error) { + ctx.UninterruptibleSleepStart(false) + val, err := c.file.GetXattr(name, size) + ctx.UninterruptibleSleepFinish(false) + return val, err +} + +func (c *contextFile) setXattr(ctx context.Context, name, value string, flags uint32) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.SetXattr(name, value, flags) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) listXattr(ctx context.Context, size uint64) (map[string]struct{}, error) { + ctx.UninterruptibleSleepStart(false) + xattrs, err := c.file.ListXattr(size) + ctx.UninterruptibleSleepFinish(false) + return xattrs, err +} + +func (c *contextFile) removeXattr(ctx context.Context, name string) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.RemoveXattr(name) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) allocate(ctx context.Context, mode p9.AllocateMode, offset, length uint64) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.Allocate(mode, offset, length) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) rename(ctx context.Context, directory contextFile, name string) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.Rename(directory.file, name) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) close(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.Close() + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) open(ctx context.Context, mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) { + ctx.UninterruptibleSleepStart(false) + f, q, u, err := c.file.Open(mode) + ctx.UninterruptibleSleepFinish(false) + return f, q, u, err +} + +func (c *contextFile) readAt(ctx context.Context, p []byte, offset uint64) (int, error) { + ctx.UninterruptibleSleepStart(false) + n, err := c.file.ReadAt(p, offset) + ctx.UninterruptibleSleepFinish(false) + return n, err +} + +func (c *contextFile) writeAt(ctx context.Context, p []byte, offset uint64) (int, error) { + ctx.UninterruptibleSleepStart(false) + n, err := c.file.WriteAt(p, offset) + ctx.UninterruptibleSleepFinish(false) + return n, err +} + +func (c *contextFile) fsync(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.FSync() + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) create(ctx context.Context, name string, flags p9.OpenFlags, permissions p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, error) { + ctx.UninterruptibleSleepStart(false) + fd, _, _, _, err := c.file.Create(name, flags, permissions, uid, gid) + ctx.UninterruptibleSleepFinish(false) + return fd, err +} + +func (c *contextFile) mkdir(ctx context.Context, name string, permissions p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + q, err := c.file.Mkdir(name, permissions, uid, gid) + ctx.UninterruptibleSleepFinish(false) + return q, err +} + +func (c *contextFile) symlink(ctx context.Context, oldName string, newName string, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + q, err := c.file.Symlink(oldName, newName, uid, gid) + ctx.UninterruptibleSleepFinish(false) + return q, err +} + +func (c *contextFile) link(ctx context.Context, target *contextFile, newName string) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.Link(target.file, newName) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) mknod(ctx context.Context, name string, permissions p9.FileMode, major uint32, minor uint32, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + q, err := c.file.Mknod(name, permissions, major, minor, uid, gid) + ctx.UninterruptibleSleepFinish(false) + return q, err +} + +func (c *contextFile) unlinkAt(ctx context.Context, name string, flags uint32) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.UnlinkAt(name, flags) + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) readdir(ctx context.Context, offset uint64, count uint32) ([]p9.Dirent, error) { + ctx.UninterruptibleSleepStart(false) + d, err := c.file.Readdir(offset, count) + ctx.UninterruptibleSleepFinish(false) + return d, err +} + +func (c *contextFile) readlink(ctx context.Context) (string, error) { + ctx.UninterruptibleSleepStart(false) + s, err := c.file.Readlink() + ctx.UninterruptibleSleepFinish(false) + return s, err +} + +func (c *contextFile) flush(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + err := c.file.Flush() + ctx.UninterruptibleSleepFinish(false) + return err +} + +func (c *contextFile) walkGetAttr(ctx context.Context, names []string) ([]p9.QID, contextFile, p9.AttrMask, p9.Attr, error) { + ctx.UninterruptibleSleepStart(false) + q, f, m, a, err := c.file.WalkGetAttr(names) + if err != nil { + ctx.UninterruptibleSleepFinish(false) + return nil, contextFile{}, p9.AttrMask{}, p9.Attr{}, err + } + ctx.UninterruptibleSleepFinish(false) + return q, contextFile{file: f}, m, a, nil +} + +func (c *contextFile) connect(ctx context.Context, flags p9.ConnectFlags) (*fd.FD, error) { + ctx.UninterruptibleSleepStart(false) + f, err := c.file.Connect(flags) + ctx.UninterruptibleSleepFinish(false) + return f, err +} diff --git a/pkg/sentry/fs/gofer/device.go b/pkg/sentry/fs/gofer/device.go new file mode 100644 index 000000000..cbd3c5da2 --- /dev/null +++ b/pkg/sentry/fs/gofer/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import "gvisor.dev/gvisor/pkg/sentry/device" + +// goferDevice is the gofer virtual device. +var goferDevice = device.NewAnonMultiDevice() diff --git a/pkg/sentry/fs/gofer/fifo.go b/pkg/sentry/fs/gofer/fifo.go new file mode 100644 index 000000000..456557058 --- /dev/null +++ b/pkg/sentry/fs/gofer/fifo.go @@ -0,0 +1,40 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// +stateify savable +type fifo struct { + fs.InodeOperations + fileIops *inodeOperations +} + +var _ fs.InodeOperations = (*fifo)(nil) + +// Rename implements fs.InodeOperations. It forwards the call to the underlying +// file inode to handle the file rename. Note that file key remains the same +// after the rename to keep the endpoint mapping. +func (i *fifo) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return i.fileIops.Rename(ctx, inode, oldParent, oldName, newParent, newName, replacement) +} + +// StatFS implements fs.InodeOperations. +func (i *fifo) StatFS(ctx context.Context) (fs.Info, error) { + return i.fileIops.StatFS(ctx) +} diff --git a/pkg/sentry/fs/gofer/file.go b/pkg/sentry/fs/gofer/file.go new file mode 100644 index 000000000..b2fcab127 --- /dev/null +++ b/pkg/sentry/fs/gofer/file.go @@ -0,0 +1,369 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + "syscall" + "time" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/metric" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +var ( + opensWX = metric.MustCreateNewUint64Metric("/gofer/opened_write_execute_file", true /* sync */, "Number of times a writable+executable file was opened from a gofer.") + opens9P = metric.MustCreateNewUint64Metric("/gofer/opens_9p", false /* sync */, "Number of times a 9P file was opened from a gofer.") + opensHost = metric.MustCreateNewUint64Metric("/gofer/opens_host", false /* sync */, "Number of times a host file was opened from a gofer.") + reads9P = metric.MustCreateNewUint64Metric("/gofer/reads_9p", false /* sync */, "Number of 9P file reads from a gofer.") + readWait9P = metric.MustCreateNewUint64NanosecondsMetric("/gofer/read_wait_9p", false /* sync */, "Time waiting on 9P file reads from a gofer, in nanoseconds.") + readsHost = metric.MustCreateNewUint64Metric("/gofer/reads_host", false /* sync */, "Number of host file reads from a gofer.") + readWaitHost = metric.MustCreateNewUint64NanosecondsMetric("/gofer/read_wait_host", false /* sync */, "Time waiting on host file reads from a gofer, in nanoseconds.") +) + +// fileOperations implements fs.FileOperations for a remote file system. +// +// +stateify savable +type fileOperations struct { + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosplice"` + waiter.AlwaysReady `state:"nosave"` + + // inodeOperations is the inodeOperations backing the file. It is protected + // by a reference held by File.Dirent.Inode which is stable until + // FileOperations.Release is called. + inodeOperations *inodeOperations `state:"wait"` + + // dirCursor is the directory cursor. + dirCursor string + + // handles are the opened remote file system handles, which may + // be shared with other files. + handles *handles `state:"nosave"` + + // flags are the flags used to open handles. + flags fs.FileFlags `state:"wait"` +} + +// fileOperations implements fs.FileOperations. +var _ fs.FileOperations = (*fileOperations)(nil) + +// NewFile returns a file. NewFile is not appropriate with host pipes and sockets. +// +// The `name` argument is only used to log a warning if we are returning a +// writeable+executable file. (A metric counter is incremented in this case as +// well.) Note that we cannot call d.BaseName() directly in this function, +// because that would lead to a lock order violation, since this is called in +// d.Create which holds d.mu, while d.BaseName() takes d.parent.mu, and the two +// locks must be taken in the opposite order. +func NewFile(ctx context.Context, dirent *fs.Dirent, name string, flags fs.FileFlags, i *inodeOperations, handles *handles) *fs.File { + // Remote file systems enforce readability/writability at an offset, + // see fs/9p/vfs_inode.c:v9fs_vfs_atomic_open -> fs/open.c:finish_open. + flags.Pread = true + flags.Pwrite = true + + if fs.IsFile(dirent.Inode.StableAttr) { + // If cache policy is "remote revalidating", then we must + // ensure that we have a host FD. Otherwise, the + // sentry-internal page cache will be used, and we can end up + // in an inconsistent state if the remote file changes. + cp := dirent.Inode.InodeOperations.(*inodeOperations).session().cachePolicy + if cp == cacheRemoteRevalidating && handles.Host == nil { + panic(fmt.Sprintf("remote-revalidating cache policy requires gofer to donate host FD, but file %q did not have host FD", name)) + } + } + + f := &fileOperations{ + inodeOperations: i, + handles: handles, + flags: flags, + } + if flags.Write { + if err := dirent.Inode.CheckPermission(ctx, fs.PermMask{Execute: true}); err == nil { + opensWX.Increment() + log.Warningf("Opened a writable executable: %q", name) + } + } + if handles.Host != nil { + opensHost.Increment() + } else { + opens9P.Increment() + } + return fs.NewFile(ctx, dirent, flags, f) +} + +// Release implements fs.FileOpeations.Release. +func (f *fileOperations) Release() { + f.handles.DecRef() +} + +// Readdir implements fs.FileOperations.Readdir. +func (f *fileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + n, err := fs.DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) + if f.inodeOperations.session().cachePolicy.cacheUAttrs(file.Dirent.Inode) { + f.inodeOperations.cachingInodeOps.TouchAccessTime(ctx, file.Dirent.Inode) + } + return n, err +} + +// IterateDir implements fs.DirIterator.IterateDir. +func (f *fileOperations) IterateDir(ctx context.Context, d *fs.Dirent, dirCtx *fs.DirCtx, offset int) (int, error) { + f.inodeOperations.readdirMu.Lock() + defer f.inodeOperations.readdirMu.Unlock() + + // Fetch directory entries if needed. + if !f.inodeOperations.session().cachePolicy.cacheReaddir() || f.inodeOperations.readdirCache == nil { + entries, err := f.readdirAll(ctx) + if err != nil { + return offset, err + } + + // Cache the readdir result. + f.inodeOperations.readdirCache = fs.NewSortedDentryMap(entries) + } + + // Serialize the entries. + n, err := fs.GenericReaddir(dirCtx, f.inodeOperations.readdirCache) + return offset + n, err +} + +// readdirAll fetches fs.DentAttrs for f, using the attributes of g. +func (f *fileOperations) readdirAll(ctx context.Context) (map[string]fs.DentAttr, error) { + entries := make(map[string]fs.DentAttr) + var readOffset uint64 + for { + // We choose some arbitrary high number of directory entries (64k) and call + // Readdir until we've exhausted them all. + dirents, err := f.handles.File.readdir(ctx, readOffset, 64*1024) + if err != nil { + return nil, err + } + if len(dirents) == 0 { + // We're done, we reached EOF. + break + } + + // The last dirent contains the offset into the next set of dirents. The gofer + // returns the offset as an index into directories, not as a byte offset, because + // converting a byte offset to an index into directories entries is a huge pain. + // But everything is fine if we're consistent. + readOffset = dirents[len(dirents)-1].Offset + + for _, dirent := range dirents { + if dirent.Name == "." || dirent.Name == ".." { + // These must not be included in Readdir results. + continue + } + + // Find a best approximation of the type. + var nt fs.InodeType + switch dirent.Type { + case p9.TypeDir: + nt = fs.Directory + case p9.TypeSymlink: + nt = fs.Symlink + default: + nt = fs.RegularFile + } + + // Install the DentAttr. + entries[dirent.Name] = fs.DentAttr{ + Type: nt, + // Construct the key to find the virtual inode. + // Directory entries reside on the same Device + // and SecondaryDevice as their parent. + InodeID: goferDevice.Map(device.MultiDeviceKey{ + Device: f.inodeOperations.fileState.key.Device, + SecondaryDevice: f.inodeOperations.fileState.key.SecondaryDevice, + Inode: dirent.QID.Path, + }), + } + } + } + + return entries, nil +} + +// maybeSync will call FSync on the file if either the cache policy or file +// flags require it. +func (f *fileOperations) maybeSync(ctx context.Context, file *fs.File, offset, n int64) error { + if n == 0 { + // Nothing to sync. + return nil + } + + if f.inodeOperations.session().cachePolicy.writeThrough(file.Dirent.Inode) { + // Call WriteOut directly, as some "writethrough" filesystems + // do not support sync. + return f.inodeOperations.cachingInodeOps.WriteOut(ctx, file.Dirent.Inode) + } + + flags := file.Flags() + var syncType fs.SyncType + switch { + case flags.Direct || flags.Sync: + syncType = fs.SyncAll + case flags.DSync: + syncType = fs.SyncData + default: + // No need to sync. + return nil + } + + return f.Fsync(ctx, file, offset, offset+n, syncType) +} + +// Write implements fs.FileOperations.Write. +func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + if fs.IsDir(file.Dirent.Inode.StableAttr) { + // Not all remote file systems enforce this so this client does. + return 0, syserror.EISDIR + } + + var ( + n int64 + err error + ) + // The write is handled in different ways depending on the cache policy + // and availability of a host-mappable FD. + if f.inodeOperations.session().cachePolicy.useCachingInodeOps(file.Dirent.Inode) { + n, err = f.inodeOperations.cachingInodeOps.Write(ctx, src, offset) + } else if f.inodeOperations.fileState.hostMappable != nil { + n, err = f.inodeOperations.fileState.hostMappable.Write(ctx, src, offset) + } else { + n, err = src.CopyInTo(ctx, f.handles.readWriterAt(ctx, offset)) + } + + // We may need to sync the written bytes. + if syncErr := f.maybeSync(ctx, file, offset, n); syncErr != nil { + // Sync failed. Report 0 bytes written, since none of them are + // guaranteed to have been synced. + return 0, syncErr + } + + return n, err +} + +// incrementReadCounters increments the read counters for the read starting at the given time. We +// use this function rather than using a defer in Read() to avoid the performance hit of defer. +func (f *fileOperations) incrementReadCounters(start time.Time) { + if f.handles.Host != nil { + readsHost.Increment() + fs.IncrementWait(readWaitHost, start) + } else { + reads9P.Increment() + fs.IncrementWait(readWait9P, start) + } +} + +// Read implements fs.FileOperations.Read. +func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + var start time.Time + if fs.RecordWaitTime { + start = time.Now() + } + if fs.IsDir(file.Dirent.Inode.StableAttr) { + // Not all remote file systems enforce this so this client does. + f.incrementReadCounters(start) + return 0, syserror.EISDIR + } + + if f.inodeOperations.session().cachePolicy.useCachingInodeOps(file.Dirent.Inode) { + n, err := f.inodeOperations.cachingInodeOps.Read(ctx, file, dst, offset) + f.incrementReadCounters(start) + return n, err + } + n, err := dst.CopyOutFrom(ctx, f.handles.readWriterAt(ctx, offset)) + f.incrementReadCounters(start) + return n, err +} + +// Fsync implements fs.FileOperations.Fsync. +func (f *fileOperations) Fsync(ctx context.Context, file *fs.File, start, end int64, syncType fs.SyncType) error { + switch syncType { + case fs.SyncAll, fs.SyncData: + if err := file.Dirent.Inode.WriteOut(ctx); err != nil { + return err + } + fallthrough + case fs.SyncBackingStorage: + // Sync remote caches. + if f.handles.Host != nil { + // Sync the host fd directly. + return syscall.Fsync(f.handles.Host.FD()) + } + // Otherwise sync on the p9.File handle. + return f.handles.File.fsync(ctx) + } + panic("invalid sync type") +} + +// Flush implements fs.FileOperations.Flush. +func (f *fileOperations) Flush(ctx context.Context, file *fs.File) error { + // If this file is not opened writable then there is nothing to flush. + // We do this because some p9 server implementations of Flush are + // over-zealous. + // + // FIXME(edahlgren): weaken these implementations and remove this check. + if !file.Flags().Write { + return nil + } + // Execute the flush. + return f.handles.File.flush(ctx) +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (f *fileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + return f.inodeOperations.configureMMap(file, opts) +} + +// UnstableAttr implements fs.FileOperations.UnstableAttr. +func (f *fileOperations) UnstableAttr(ctx context.Context, file *fs.File) (fs.UnstableAttr, error) { + s := f.inodeOperations.session() + if s.cachePolicy.cacheUAttrs(file.Dirent.Inode) { + return f.inodeOperations.cachingInodeOps.UnstableAttr(ctx, file.Dirent.Inode) + } + // Use f.handles.File, which represents 9P fids that have been opened, + // instead of inodeFileState.file, which represents 9P fids that have not. + // This may be significantly more efficient in some implementations. + _, valid, pattr, err := getattr(ctx, f.handles.File) + if err != nil { + return fs.UnstableAttr{}, err + } + return unstable(ctx, valid, pattr, s.mounter, s.client), nil +} + +// Seek implements fs.FileOperations.Seek. +func (f *fileOperations) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return fsutil.SeekWithDirCursor(ctx, file, whence, offset, &f.dirCursor) +} diff --git a/pkg/sentry/fs/gofer/file_state.go b/pkg/sentry/fs/gofer/file_state.go new file mode 100644 index 000000000..edd6576aa --- /dev/null +++ b/pkg/sentry/fs/gofer/file_state.go @@ -0,0 +1,44 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// afterLoad is invoked by stateify. +func (f *fileOperations) afterLoad() { + load := func() error { + f.inodeOperations.fileState.waitForLoad() + + // Manually load the open handles. + var err error + + // The file may have been opened with Truncate, but we don't + // want to re-open it with Truncate or we will lose data. + flags := f.flags + flags.Truncate = false + + f.handles, err = f.inodeOperations.fileState.getHandles(context.Background(), flags, f.inodeOperations.cachingInodeOps) + if err != nil { + return fmt.Errorf("failed to re-open handle: %v", err) + } + return nil + } + fs.Async(fs.CatchError(load)) +} diff --git a/pkg/sentry/fs/gofer/fs.go b/pkg/sentry/fs/gofer/fs.go new file mode 100644 index 000000000..8ae2d78d7 --- /dev/null +++ b/pkg/sentry/fs/gofer/fs.go @@ -0,0 +1,267 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gofer implements a remote 9p filesystem. +package gofer + +import ( + "errors" + "fmt" + "strconv" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// The following are options defined by the Linux 9p client that we support, +// see Documentation/filesystems/9p.txt. +const ( + // The transport method. + transportKey = "trans" + + // The file tree to access when the file server + // is exporting several file systems. Stands for "attach name". + anameKey = "aname" + + // The caching policy. + cacheKey = "cache" + + // The file descriptor for reading with trans=fd. + readFDKey = "rfdno" + + // The file descriptor for writing with trans=fd. + writeFDKey = "wfdno" + + // The number of bytes to use for a 9p packet payload. + msizeKey = "msize" + + // The 9p protocol version. + versionKey = "version" + + // If set to true allows the creation of unix domain sockets inside the + // sandbox using files backed by the gofer. If set to false, unix sockets + // cannot be bound to gofer files without an overlay on top. + privateUnixSocketKey = "privateunixsocket" + + // If present, sets CachingInodeOperationsOptions.LimitHostFDTranslation to + // true. + limitHostFDTranslationKey = "limit_host_fd_translation" + + // overlayfsStaleRead if present closes cached readonly file after the first + // write. This is done to workaround a limitation of Linux overlayfs. + overlayfsStaleRead = "overlayfs_stale_read" +) + +// defaultAname is the default attach name. +const defaultAname = "/" + +// defaultMSize is the message size used for chunking large read and write requests. +// This has been tested to give good enough performance up to 64M. +const defaultMSize = 1024 * 1024 // 1M + +// defaultVersion is the default 9p protocol version. Will negotiate downwards with +// file server if needed. +var defaultVersion = p9.HighestVersionString() + +// Number of names of non-children to cache, preventing unneeded walks. 64 is +// plenty for nodejs, which seems to stat about 4 children on every require(). +const nonChildrenCacheSize = 64 + +var ( + // ErrNoTransport is returned when there is no 'trans' option. + ErrNoTransport = errors.New("missing required option: 'trans='") + + // ErrFileNoReadFD is returned when there is no 'rfdno' option. + ErrFileNoReadFD = errors.New("missing required option: 'rfdno='") + + // ErrFileNoWriteFD is returned when there is no 'wfdno' option. + ErrFileNoWriteFD = errors.New("missing required option: 'wfdno='") +) + +// filesystem is a 9p client. +// +// +stateify savable +type filesystem struct{} + +var _ fs.Filesystem = (*filesystem)(nil) + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// The name matches fs/9p/vfs_super.c:v9fs_fs_type.name. +const FilesystemName = "9p" + +// Name is the name of the filesystem. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount prohibits users from using mount(2) with this file system. +func (*filesystem) AllowUserMount() bool { + return false +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// The 9p Linux client returns FS_RENAME_DOES_D_MOVE, see fs/9p/vfs_super.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns an attached 9p client that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, _ interface{}) (*fs.Inode, error) { + // Parse and validate the mount options. + o, err := options(data) + if err != nil { + return nil, err + } + + // Construct the 9p root to mount. We intentionally diverge from Linux in that + // the first Tversion and Tattach requests are done lazily. + return Root(ctx, device, f, flags, o) +} + +// opts are parsed 9p mount options. +type opts struct { + fd int + aname string + policy cachePolicy + msize uint32 + version string + privateunixsocket bool + limitHostFDTranslation bool + overlayfsStaleRead bool +} + +// options parses mount(2) data into structured options. +func options(data string) (opts, error) { + var o opts + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Check for the required 'trans=fd' option. + trans, ok := options[transportKey] + if !ok { + return o, ErrNoTransport + } + if trans != "fd" { + return o, fmt.Errorf("unsupported transport: 'trans=%s'", trans) + } + delete(options, transportKey) + + // Check for the required 'rfdno=' option. + srfd, ok := options[readFDKey] + if !ok { + return o, ErrFileNoReadFD + } + delete(options, readFDKey) + + // Check for the required 'wfdno=' option. + swfd, ok := options[writeFDKey] + if !ok { + return o, ErrFileNoWriteFD + } + delete(options, writeFDKey) + + // Parse the read fd. + rfd, err := strconv.Atoi(srfd) + if err != nil { + return o, fmt.Errorf("invalid fd for 'rfdno=%s': %v", srfd, err) + } + + // Parse the write fd. + wfd, err := strconv.Atoi(swfd) + if err != nil { + return o, fmt.Errorf("invalid fd for 'wfdno=%s': %v", swfd, err) + } + + // Require that the read and write fd are the same. + if rfd != wfd { + return o, fmt.Errorf("fd in 'rfdno=%d' and 'wfdno=%d' must match", rfd, wfd) + } + o.fd = rfd + + // Parse the attach name. + o.aname = defaultAname + if an, ok := options[anameKey]; ok { + o.aname = an + delete(options, anameKey) + } + + // Parse the cache policy. Reject unsupported policies. + o.policy = cacheAll + if policy, ok := options[cacheKey]; ok { + cp, err := parseCachePolicy(policy) + if err != nil { + return o, err + } + o.policy = cp + delete(options, cacheKey) + } + + // Parse the message size. Reject malformed options. + o.msize = uint32(defaultMSize) + if m, ok := options[msizeKey]; ok { + i, err := strconv.ParseUint(m, 10, 32) + if err != nil { + return o, fmt.Errorf("invalid message size for 'msize=%s': %v", m, err) + } + o.msize = uint32(i) + delete(options, msizeKey) + } + + // Parse the protocol version. + o.version = defaultVersion + if v, ok := options[versionKey]; ok { + o.version = v + delete(options, versionKey) + } + + // Parse the unix socket policy. Reject non-booleans. + if v, ok := options[privateUnixSocketKey]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return o, fmt.Errorf("invalid boolean value for '%s=%s': %v", privateUnixSocketKey, v, err) + } + o.privateunixsocket = b + delete(options, privateUnixSocketKey) + } + + if _, ok := options[limitHostFDTranslationKey]; ok { + o.limitHostFDTranslation = true + delete(options, limitHostFDTranslationKey) + } + + if _, ok := options[overlayfsStaleRead]; ok { + o.overlayfsStaleRead = true + delete(options, overlayfsStaleRead) + } + + // Fail to attach if the caller wanted us to do something that we + // don't support. + if len(options) > 0 { + return o, fmt.Errorf("unsupported mount options: %v", options) + } + + return o, nil +} diff --git a/pkg/sentry/fs/gofer/gofer_test.go b/pkg/sentry/fs/gofer/gofer_test.go new file mode 100644 index 000000000..2df2fe889 --- /dev/null +++ b/pkg/sentry/fs/gofer/gofer_test.go @@ -0,0 +1,310 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + "syscall" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/p9/p9test" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// rootTest runs a test with a p9 mock and an fs.InodeOperations created from +// the attached root directory. The root file will be closed and client +// disconnected, but additional files must be closed manually. +func rootTest(t *testing.T, name string, cp cachePolicy, fn func(context.Context, *p9test.Harness, *p9test.Mock, *fs.Inode)) { + t.Run(name, func(t *testing.T) { + h, c := p9test.NewHarness(t) + defer h.Finish() + + // Create a new root. Note that we pass an empty, but non-nil + // map here. This allows tests to extend the root children + // dynamically. + root := h.NewDirectory(map[string]p9test.Generator{})(nil) + + // Return this as the root. + h.Attacher.EXPECT().Attach().Return(root, nil).Times(1) + + // ... and open via the client. + rootFile, err := c.Attach("/") + if err != nil { + t.Fatalf("unable to attach: %v", err) + } + defer rootFile.Close() + + // Wrap an a session. + s := &session{ + mounter: fs.RootOwner, + cachePolicy: cp, + client: c, + } + + // ... and an INode, with only the mode being explicitly valid for now. + ctx := contexttest.Context(t) + sattr, rootInodeOperations := newInodeOperations(ctx, s, contextFile{ + file: rootFile, + }, root.QID, p9.AttrMaskAll(), root.Attr) + m := fs.NewMountSource(ctx, s, &filesystem{}, fs.MountSourceFlags{}) + rootInode := fs.NewInode(ctx, rootInodeOperations, m, sattr) + + // Ensure that the cache is fully invalidated, so that any + // close actions actually take place before the full harness is + // torn down. + defer func() { + m.FlushDirentRefs() + + // Wait for all resources to be released, otherwise the + // operations may fail after we close the rootFile. + fs.AsyncBarrier() + }() + + // Execute the test. + fn(ctx, h, root, rootInode) + }) +} + +func TestLookup(t *testing.T) { + type lookupTest struct { + // Name of the test. + name string + + // Expected return value. + want error + } + + tests := []lookupTest{ + { + name: "mock Walk passes (function succeeds)", + want: nil, + }, + { + name: "mock Walk fails (function fails)", + want: syscall.ENOENT, + }, + } + + const file = "file" // The walked target file. + + for _, test := range tests { + rootTest(t, test.name, cacheNone, func(ctx context.Context, h *p9test.Harness, rootFile *p9test.Mock, rootInode *fs.Inode) { + // Setup the appropriate result. + rootFile.WalkCallback = func() error { + return test.want + } + if test.want == nil { + // Set the contents of the root. We expect a + // normal file generator for ppp above. This is + // overriden by setting WalkErr in the mock. + rootFile.AddChild(file, h.NewFile()) + } + + // Call function. + dirent, err := rootInode.Lookup(ctx, file) + + // Unwrap the InodeOperations. + var newInodeOperations fs.InodeOperations + if dirent != nil { + if dirent.IsNegative() { + err = syscall.ENOENT + } else { + newInodeOperations = dirent.Inode.InodeOperations + } + } + + // Check return values. + if err != test.want { + t.Errorf("Lookup got err %v, want %v", err, test.want) + } + if err == nil && newInodeOperations == nil { + t.Errorf("Lookup got non-nil err and non-nil node, wanted at least one non-nil") + } + }) + } +} + +func TestRevalidation(t *testing.T) { + type revalidationTest struct { + cachePolicy cachePolicy + + // Whether dirent should be reloaded before any modifications. + preModificationWantReload bool + + // Whether dirent should be reloaded after updating an unstable + // attribute on the remote fs. + postModificationWantReload bool + + // Whether dirent unstable attributes should be updated after + // updating an attribute on the remote fs. + postModificationWantUpdatedAttrs bool + + // Whether dirent should be reloaded after the remote has + // removed the file. + postRemovalWantReload bool + } + + tests := []revalidationTest{ + { + // Policy cacheNone causes Revalidate to always return + // true. + cachePolicy: cacheNone, + preModificationWantReload: true, + postModificationWantReload: true, + postModificationWantUpdatedAttrs: true, + postRemovalWantReload: true, + }, + { + // Policy cacheAll causes Revalidate to always return + // false. + cachePolicy: cacheAll, + preModificationWantReload: false, + postModificationWantReload: false, + postModificationWantUpdatedAttrs: false, + postRemovalWantReload: false, + }, + { + // Policy cacheAllWritethrough causes Revalidate to + // always return false. + cachePolicy: cacheAllWritethrough, + preModificationWantReload: false, + postModificationWantReload: false, + postModificationWantUpdatedAttrs: false, + postRemovalWantReload: false, + }, + { + // Policy cacheRemoteRevalidating causes Revalidate to + // return update cached unstable attrs, and returns + // true only when the remote inode itself has been + // removed or replaced. + cachePolicy: cacheRemoteRevalidating, + preModificationWantReload: false, + postModificationWantReload: false, + postModificationWantUpdatedAttrs: true, + postRemovalWantReload: true, + }, + } + + const file = "file" // The file walked below. + + for _, test := range tests { + name := fmt.Sprintf("cachepolicy=%s", test.cachePolicy) + rootTest(t, name, test.cachePolicy, func(ctx context.Context, h *p9test.Harness, rootFile *p9test.Mock, rootInode *fs.Inode) { + // Wrap in a dirent object. + rootDir := fs.NewDirent(ctx, rootInode, "root") + + // Create a mock file a child of the root. We save when + // this is generated, so that when the time changed, we + // can update the original entry. + var origMocks []*p9test.Mock + rootFile.AddChild(file, func(parent *p9test.Mock) *p9test.Mock { + // Regular a regular file that has a consistent + // path number. This might be used by + // validation so we don't change it. + m := h.NewMock(parent, 0, p9.Attr{ + Mode: p9.ModeRegular, + }) + origMocks = append(origMocks, m) + return m + }) + + // Do the walk. + dirent, err := rootDir.Walk(ctx, rootDir, file) + if err != nil { + t.Fatalf("Lookup failed: %v", err) + } + + // We must release the dirent, of the test will fail + // with a reference leak. This is tracked by p9test. + defer dirent.DecRef() + + // Walk again. Depending on the cache policy, we may + // get a new dirent. + newDirent, err := rootDir.Walk(ctx, rootDir, file) + if err != nil { + t.Fatalf("Lookup failed: %v", err) + } + if test.preModificationWantReload && dirent == newDirent { + t.Errorf("Lookup with cachePolicy=%s got old dirent %+v, wanted a new dirent", test.cachePolicy, dirent) + } + if !test.preModificationWantReload && dirent != newDirent { + t.Errorf("Lookup with cachePolicy=%s got new dirent %+v, wanted old dirent %+v", test.cachePolicy, newDirent, dirent) + } + newDirent.DecRef() // See above. + + // Modify the underlying mocked file's modification + // time for the next walk that occurs. + nowSeconds := time.Now().Unix() + rootFile.AddChild(file, func(parent *p9test.Mock) *p9test.Mock { + // Ensure that the path is the same as above, + // but we change only the modification time of + // the file. + return h.NewMock(parent, 0, p9.Attr{ + Mode: p9.ModeRegular, + MTimeSeconds: uint64(nowSeconds), + }) + }) + + // We also modify the original time, so that GetAttr + // behaves as expected for the caching case. + for _, m := range origMocks { + m.Attr.MTimeSeconds = uint64(nowSeconds) + } + + // Walk again. Depending on the cache policy, we may + // get a new dirent. + newDirent, err = rootDir.Walk(ctx, rootDir, file) + if err != nil { + t.Fatalf("Lookup failed: %v", err) + } + if test.postModificationWantReload && dirent == newDirent { + t.Errorf("Lookup with cachePolicy=%s got old dirent, wanted a new dirent", test.cachePolicy) + } + if !test.postModificationWantReload && dirent != newDirent { + t.Errorf("Lookup with cachePolicy=%s got new dirent, wanted old dirent", test.cachePolicy) + } + uattrs, err := newDirent.Inode.UnstableAttr(ctx) + if err != nil { + t.Fatalf("Error getting unstable attrs: %v", err) + } + gotModTimeSeconds := uattrs.ModificationTime.Seconds() + if test.postModificationWantUpdatedAttrs && gotModTimeSeconds != nowSeconds { + t.Fatalf("Lookup with cachePolicy=%s got new modification time %v, wanted %v", test.cachePolicy, gotModTimeSeconds, nowSeconds) + } + newDirent.DecRef() // See above. + + // Remove the file from the remote fs, subsequent walks + // should now fail to find anything. + rootFile.RemoveChild(file) + + // Walk again. Depending on the cache policy, we may + // get ENOENT. + newDirent, err = rootDir.Walk(ctx, rootDir, file) + if test.postRemovalWantReload && err == nil { + t.Errorf("Lookup with cachePolicy=%s got nil error, wanted ENOENT", test.cachePolicy) + } + if !test.postRemovalWantReload && (err != nil || dirent != newDirent) { + t.Errorf("Lookup with cachePolicy=%s got new dirent and error %v, wanted old dirent and nil error", test.cachePolicy, err) + } + if err == nil { + newDirent.DecRef() // See above. + } + }) + } +} diff --git a/pkg/sentry/fs/gofer/handles.go b/pkg/sentry/fs/gofer/handles.go new file mode 100644 index 000000000..fc14249be --- /dev/null +++ b/pkg/sentry/fs/gofer/handles.go @@ -0,0 +1,140 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "io" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/secio" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// handles are the open handles of a gofer file. They are reference counted to +// support open handle sharing between files for read only filesystems. +// +// If Host != nil then it will be used exclusively over File. +type handles struct { + refs.AtomicRefCount + + // File is a p9.File handle. Must not be nil. + File contextFile + + // Host is an *fd.FD handle. May be nil. + Host *fd.FD + + // isHostBorrowed tells whether 'Host' is owned or borrowed. If owned, it's + // closed on destruction, otherwise it's released. + isHostBorrowed bool +} + +// DecRef drops a reference on handles. +func (h *handles) DecRef() { + h.DecRefWithDestructor(func() { + if h.Host != nil { + if h.isHostBorrowed { + h.Host.Release() + } else { + if err := h.Host.Close(); err != nil { + log.Warningf("error closing host file: %v", err) + } + } + } + if err := h.File.close(context.Background()); err != nil { + log.Warningf("error closing p9 file: %v", err) + } + }) +} + +func newHandles(ctx context.Context, client *p9.Client, file contextFile, flags fs.FileFlags) (*handles, error) { + _, newFile, err := file.walk(ctx, nil) + if err != nil { + return nil, err + } + + var p9flags p9.OpenFlags + switch { + case flags.Read && flags.Write: + p9flags = p9.ReadWrite + case flags.Read && !flags.Write: + p9flags = p9.ReadOnly + case !flags.Read && flags.Write: + p9flags = p9.WriteOnly + default: + panic("impossible fs.FileFlags") + } + if flags.Truncate && p9.VersionSupportsOpenTruncateFlag(client.Version()) { + p9flags |= p9.OpenTruncate + } + + hostFile, _, _, err := newFile.open(ctx, p9flags) + if err != nil { + newFile.close(ctx) + return nil, err + } + h := handles{ + File: newFile, + Host: hostFile, + } + h.EnableLeakCheck("gofer.handles") + return &h, nil +} + +type handleReadWriter struct { + ctx context.Context + h *handles + off int64 +} + +func (h *handles) readWriterAt(ctx context.Context, offset int64) *handleReadWriter { + return &handleReadWriter{ctx, h, offset} +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *handleReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + var r io.Reader + if rw.h.Host != nil { + r = secio.NewOffsetReader(rw.h.Host, rw.off) + } else { + r = &p9.ReadWriterFile{File: rw.h.File.file, Offset: uint64(rw.off)} + } + + rw.ctx.UninterruptibleSleepStart(false) + defer rw.ctx.UninterruptibleSleepFinish(false) + n, err := safemem.FromIOReader{r}.ReadToBlocks(dsts) + rw.off += int64(n) + return n, err +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (rw *handleReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + var w io.Writer + if rw.h.Host != nil { + w = secio.NewOffsetWriter(rw.h.Host, rw.off) + } else { + w = &p9.ReadWriterFile{File: rw.h.File.file, Offset: uint64(rw.off)} + } + + rw.ctx.UninterruptibleSleepStart(false) + defer rw.ctx.UninterruptibleSleepFinish(false) + n, err := safemem.FromIOWriter{w}.WriteFromBlocks(srcs) + rw.off += int64(n) + return n, err +} diff --git a/pkg/sentry/fs/gofer/inode.go b/pkg/sentry/fs/gofer/inode.go new file mode 100644 index 000000000..51d7368a1 --- /dev/null +++ b/pkg/sentry/fs/gofer/inode.go @@ -0,0 +1,719 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "errors" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fdpipe" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/host" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" +) + +// inodeOperations implements fs.InodeOperations. +// +// +stateify savable +type inodeOperations struct { + fsutil.InodeNotVirtual `state:"nosave"` + + // fileState implements fs.CachedFileObject. It exists + // to break a circular load dependency between inodeOperations + // and cachingInodeOps (below). + fileState *inodeFileState `state:"wait"` + + // cachingInodeOps implement memmap.Mappable for inodeOperations. + cachingInodeOps *fsutil.CachingInodeOperations + + // readdirMu protects readdirCache and concurrent Readdirs. + readdirMu sync.Mutex `state:"nosave"` + + // readdirCache is a cache of readdir results in the form of + // a fs.SortedDentryMap. + // + // Starts out as nil, and is initialized under readdirMu lazily; + // invalidating the cache means setting it to nil. + readdirCache *fs.SortedDentryMap `state:"nosave"` +} + +// inodeFileState implements fs.CachedFileObject and otherwise fully +// encapsulates state that needs to be manually loaded on restore for +// this file object. +// +// This unfortunate structure exists because fs.CachingInodeOperations +// defines afterLoad and therefore cannot be lazily loaded (to break a +// circular load dependency between it and inodeOperations). Even with +// lazy loading, this approach defines the dependencies between objects +// and the expected load behavior more concretely. +// +// +stateify savable +type inodeFileState struct { + // s is common file system state for Gofers. + s *session `state:"wait"` + + // MultiDeviceKey consists of: + // + // * Device: file system device from a specific gofer. + // * SecondaryDevice: unique identifier of the attach point. + // * Inode: the inode of this resource, unique per Device.= + // + // These fields combined enable consistent hashing of virtual inodes + // on goferDevice. + key device.MultiDeviceKey `state:"nosave"` + + // file is the p9 file that contains a single unopened fid. + file contextFile `state:"nosave"` + + // sattr caches the stable attributes. + sattr fs.StableAttr `state:"wait"` + + // handlesMu protects the below fields. + handlesMu sync.RWMutex `state:"nosave"` + + // If readHandles is non-nil, it holds handles that are either read-only or + // read/write. If writeHandles is non-nil, it holds write-only handles if + // writeHandlesRW is false, and read/write handles if writeHandlesRW is + // true. + // + // Once readHandles becomes non-nil, it can't be changed until + // inodeFileState.Release()*, because of a defect in the + // fsutil.CachedFileObject interface: there's no way for the caller of + // fsutil.CachedFileObject.FD() to keep the returned FD open, so if we + // racily replace readHandles after inodeFileState.FD() has returned + // readHandles.Host.FD(), fsutil.CachingInodeOperations may use a closed + // FD. writeHandles can be changed if writeHandlesRW is false, since + // inodeFileState.FD() can't return a write-only FD, but can't be changed + // if writeHandlesRW is true for the same reason. + // + // * There is one notable exception in recreateReadHandles(), where it dup's + // the FD and invalidates the page cache. + readHandles *handles `state:"nosave"` + writeHandles *handles `state:"nosave"` + writeHandlesRW bool `state:"nosave"` + + // loading is acquired when the inodeFileState begins an asynchronous + // load. It releases when the load is complete. Callers that require all + // state to be available should call waitForLoad() to ensure that. + loading sync.Mutex `state:".(struct{})"` + + // savedUAttr is only allocated during S/R. It points to the save-time + // unstable attributes and is used to validate restore-time ones. + // + // Note that these unstable attributes are only used to detect cross-S/R + // external file system metadata changes. They may differ from the + // cached unstable attributes in cachingInodeOps, as that might differ + // from the external file system attributes if there had been WriteOut + // failures. S/R is transparent to Sentry and the latter will continue + // using its cached values after restore. + savedUAttr *fs.UnstableAttr + + // hostMappable is created when using 'cacheRemoteRevalidating' to map pages + // directly from host. + hostMappable *fsutil.HostMappable +} + +// Release releases file handles. +func (i *inodeFileState) Release(ctx context.Context) { + i.file.close(ctx) + if i.readHandles != nil { + i.readHandles.DecRef() + } + if i.writeHandles != nil { + i.writeHandles.DecRef() + } +} + +func (i *inodeFileState) canShareHandles() bool { + // Only share handles for regular files, since for other file types, + // distinct handles may have special semantics even if they represent the + // same file. Disable handle sharing for cache policy cacheNone, since this + // is legacy behavior. + return fs.IsFile(i.sattr) && i.s.cachePolicy != cacheNone +} + +// Preconditions: i.handlesMu must be locked for writing. +func (i *inodeFileState) setSharedHandlesLocked(flags fs.FileFlags, h *handles) { + if flags.Read && i.readHandles == nil { + h.IncRef() + i.readHandles = h + } + if flags.Write { + if i.writeHandles == nil { + h.IncRef() + i.writeHandles = h + i.writeHandlesRW = flags.Read + } else if !i.writeHandlesRW && flags.Read { + // Upgrade i.writeHandles. + i.writeHandles.DecRef() + h.IncRef() + i.writeHandles = h + i.writeHandlesRW = flags.Read + } + } +} + +// getHandles returns a set of handles for a new file using i opened with the +// given flags. +func (i *inodeFileState) getHandles(ctx context.Context, flags fs.FileFlags, cache *fsutil.CachingInodeOperations) (*handles, error) { + if !i.canShareHandles() { + return newHandles(ctx, i.s.client, i.file, flags) + } + + i.handlesMu.Lock() + h, invalidate, err := i.getHandlesLocked(ctx, flags) + i.handlesMu.Unlock() + + if invalidate { + cache.NotifyChangeFD() + if i.hostMappable != nil { + i.hostMappable.NotifyChangeFD() + } + } + + return h, err +} + +// getHandlesLocked returns a pointer to cached handles and a boolean indicating +// whether previously open read handle was recreated. Host mappings must be +// invalidated if so. +func (i *inodeFileState) getHandlesLocked(ctx context.Context, flags fs.FileFlags) (*handles, bool, error) { + // Check if we are able to use cached handles. + if flags.Truncate && p9.VersionSupportsOpenTruncateFlag(i.s.client.Version()) { + // If we are truncating (and the gofer supports it), then we + // always need a new handle. Don't return one from the cache. + } else if flags.Write { + if i.writeHandles != nil && (i.writeHandlesRW || !flags.Read) { + // File is opened for writing, and we have cached write + // handles that we can use. + i.writeHandles.IncRef() + return i.writeHandles, false, nil + } + } else if i.readHandles != nil { + // File is opened for reading and we have cached handles. + i.readHandles.IncRef() + return i.readHandles, false, nil + } + + // Get new handles and cache them for future sharing. + h, err := newHandles(ctx, i.s.client, i.file, flags) + if err != nil { + return nil, false, err + } + + // Read handles invalidation is needed if: + // - Mount option 'overlayfs_stale_read' is set + // - Read handle is open: nothing to invalidate otherwise + // - Write handle is not open: file was not open for write and is being open + // for write now (will trigger copy up in overlayfs). + invalidate := false + if i.s.overlayfsStaleRead && i.readHandles != nil && i.writeHandles == nil && flags.Write { + if err := i.recreateReadHandles(ctx, h, flags); err != nil { + return nil, false, err + } + invalidate = true + } + i.setSharedHandlesLocked(flags, h) + return h, invalidate, nil +} + +func (i *inodeFileState) recreateReadHandles(ctx context.Context, writer *handles, flags fs.FileFlags) error { + h := writer + if !flags.Read { + // Writer can't be used for read, must create a new handle. + var err error + h, err = newHandles(ctx, i.s.client, i.file, fs.FileFlags{Read: true}) + if err != nil { + return err + } + defer h.DecRef() + } + + if i.readHandles.Host == nil { + // If current readHandles doesn't have a host FD, it can simply be replaced. + i.readHandles.DecRef() + + h.IncRef() + i.readHandles = h + return nil + } + + if h.Host == nil { + // Current read handle has a host FD and can't be replaced with one that + // doesn't, because it breaks fsutil.CachedFileObject.FD() contract. + log.Warningf("Read handle can't be invalidated, reads may return stale data") + return nil + } + + // Due to a defect in the fsutil.CachedFileObject interface, + // readHandles.Host.FD() may be used outside locks, making it impossible to + // reliably close it. To workaround it, we dup the new FD into the old one, so + // operations on the old will see the new data. Then, make the new handle take + // ownereship of the old FD and mark the old readHandle to not close the FD + // when done. + if err := syscall.Dup3(h.Host.FD(), i.readHandles.Host.FD(), syscall.O_CLOEXEC); err != nil { + return err + } + + h.Host.Close() + h.Host = fd.New(i.readHandles.Host.FD()) + i.readHandles.isHostBorrowed = true + i.readHandles.DecRef() + + h.IncRef() + i.readHandles = h + return nil +} + +// ReadToBlocksAt implements fsutil.CachedFileObject.ReadToBlocksAt. +func (i *inodeFileState) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + i.handlesMu.RLock() + n, err := i.readHandles.readWriterAt(ctx, int64(offset)).ReadToBlocks(dsts) + i.handlesMu.RUnlock() + return n, err +} + +// WriteFromBlocksAt implements fsutil.CachedFileObject.WriteFromBlocksAt. +func (i *inodeFileState) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + i.handlesMu.RLock() + n, err := i.writeHandles.readWriterAt(ctx, int64(offset)).WriteFromBlocks(srcs) + i.handlesMu.RUnlock() + return n, err +} + +// SetMaskedAttributes implements fsutil.CachedFileObject.SetMaskedAttributes. +func (i *inodeFileState) SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr, forceSetTimestamps bool) error { + if i.skipSetAttr(mask, forceSetTimestamps) { + return nil + } + as, ans := attr.AccessTime.Unix() + ms, mns := attr.ModificationTime.Unix() + // An update of status change time is implied by mask.AccessTime + // or mask.ModificationTime. Updating status change time to a + // time earlier than the system time is not possible. + return i.file.setAttr( + ctx, + p9.SetAttrMask{ + Permissions: mask.Perms, + Size: mask.Size, + UID: mask.UID, + GID: mask.GID, + ATime: mask.AccessTime, + ATimeNotSystemTime: true, + MTime: mask.ModificationTime, + MTimeNotSystemTime: true, + }, p9.SetAttr{ + Permissions: p9.FileMode(attr.Perms.LinuxMode()), + UID: p9.UID(attr.Owner.UID), + GID: p9.GID(attr.Owner.GID), + Size: uint64(attr.Size), + ATimeSeconds: uint64(as), + ATimeNanoSeconds: uint64(ans), + MTimeSeconds: uint64(ms), + MTimeNanoSeconds: uint64(mns), + }) +} + +// skipSetAttr checks if attribute change can be skipped. It can be skipped +// when: +// - Mask is empty +// - Mask contains only attributes that cannot be set in the gofer +// - forceSetTimestamps is false and mask contains only atime and/or mtime +// and host FD exists +// +// Updates to atime and mtime can be skipped because cached value will be +// "close enough" to host value, given that operation went directly to host FD. +// Skipping atime updates is particularly important to reduce the number of +// operations sent to the Gofer for readonly files. +func (i *inodeFileState) skipSetAttr(mask fs.AttrMask, forceSetTimestamps bool) bool { + // First remove attributes that cannot be updated. + cpy := mask + cpy.Type = false + cpy.DeviceID = false + cpy.InodeID = false + cpy.BlockSize = false + cpy.Usage = false + cpy.Links = false + if cpy.Empty() { + return true + } + + // Then check if more than just atime and mtime is being set. + cpy.AccessTime = false + cpy.ModificationTime = false + if !cpy.Empty() { + return false + } + + // If forceSetTimestamps was passed, then we cannot skip. + if forceSetTimestamps { + return false + } + + // Skip if we have a host FD. + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + return (i.readHandles != nil && i.readHandles.Host != nil) || + (i.writeHandles != nil && i.writeHandles.Host != nil) +} + +// Sync implements fsutil.CachedFileObject.Sync. +func (i *inodeFileState) Sync(ctx context.Context) error { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + if i.writeHandles == nil { + return nil + } + return i.writeHandles.File.fsync(ctx) +} + +// FD implements fsutil.CachedFileObject.FD. +func (i *inodeFileState) FD() int { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + if i.writeHandlesRW && i.writeHandles != nil && i.writeHandles.Host != nil { + return int(i.writeHandles.Host.FD()) + } + if i.readHandles != nil && i.readHandles.Host != nil { + return int(i.readHandles.Host.FD()) + } + return -1 +} + +// waitForLoad makes sure any restore-issued loading is done. +func (i *inodeFileState) waitForLoad() { + // This is not a no-op. The loading mutex is hold upon restore until + // all loading actions are done. + i.loading.Lock() + i.loading.Unlock() +} + +func (i *inodeFileState) unstableAttr(ctx context.Context) (fs.UnstableAttr, error) { + _, valid, pattr, err := getattr(ctx, i.file) + if err != nil { + return fs.UnstableAttr{}, err + } + return unstable(ctx, valid, pattr, i.s.mounter, i.s.client), nil +} + +func (i *inodeFileState) Allocate(ctx context.Context, offset, length int64) error { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + + // No options are supported for now. + mode := p9.AllocateMode{} + return i.writeHandles.File.allocate(ctx, mode, uint64(offset), uint64(length)) +} + +// session extracts the gofer's session from the MountSource. +func (i *inodeOperations) session() *session { + return i.fileState.s +} + +// Release implements fs.InodeOperations.Release. +func (i *inodeOperations) Release(ctx context.Context) { + i.cachingInodeOps.Release() + + // Releasing the fileState may make RPCs to the gofer. There is + // no need to wait for those to return, so we can do this + // asynchronously. + // + // We use AsyncWithContext to avoid needing to allocate an extra + // anonymous function on the heap. + fs.AsyncWithContext(ctx, i.fileState.Release) +} + +// Mappable implements fs.InodeOperations.Mappable. +func (i *inodeOperations) Mappable(inode *fs.Inode) memmap.Mappable { + if i.session().cachePolicy.useCachingInodeOps(inode) { + return i.cachingInodeOps + } + // This check is necessary because it's returning an interface type. + if i.fileState.hostMappable != nil { + return i.fileState.hostMappable + } + return nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + if i.session().cachePolicy.cacheUAttrs(inode) { + return i.cachingInodeOps.UnstableAttr(ctx, inode) + } + return i.fileState.unstableAttr(ctx) +} + +// Check implements fs.InodeOperations.Check. +func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + switch d.Inode.StableAttr.Type { + case fs.Socket: + return i.getFileSocket(ctx, d, flags) + case fs.Pipe: + return i.getFilePipe(ctx, d, flags) + default: + return i.getFileDefault(ctx, d, flags) + } +} + +func (i *inodeOperations) getFileSocket(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + f, err := i.fileState.file.connect(ctx, p9.AnonymousSocket) + if err != nil { + return nil, syscall.EIO + } + fsf, err := host.NewSocketWithDirent(ctx, d, f, flags) + if err != nil { + f.Close() + return nil, err + } + return fsf, nil +} + +func (i *inodeOperations) getFilePipe(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + // Try to open as a host pipe; if that doesn't work, handle it normally. + pipeOps, err := fdpipe.Open(ctx, i, flags) + if err == errNotHostFile { + return i.getFileDefault(ctx, d, flags) + } + if err != nil { + return nil, err + } + return fs.NewFile(ctx, d, flags, pipeOps), nil +} + +// errNotHostFile indicates that the file is not a host file. +var errNotHostFile = errors.New("not a host file") + +// NonBlockingOpen implements fdpipe.NonBlockingOpener for opening host named pipes. +func (i *inodeOperations) NonBlockingOpen(ctx context.Context, p fs.PermMask) (*fd.FD, error) { + i.fileState.waitForLoad() + + // Get a cloned fid which we will open. + _, newFile, err := i.fileState.file.walk(ctx, nil) + if err != nil { + log.Warningf("Open Walk failed: %v", err) + return nil, err + } + defer newFile.close(ctx) + + flags, err := openFlagsFromPerms(p) + if err != nil { + log.Warningf("Open flags %s parsing failed: %v", p, err) + return nil, err + } + hostFile, _, _, err := newFile.open(ctx, flags) + // If the host file returned is nil and the error is nil, + // then this was never a host file to begin with, and should + // be treated like a remote file. + if hostFile == nil && err == nil { + return nil, errNotHostFile + } + return hostFile, err +} + +func (i *inodeOperations) getFileDefault(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + h, err := i.fileState.getHandles(ctx, flags, i.cachingInodeOps) + if err != nil { + return nil, err + } + return NewFile(ctx, d, d.BaseName(), flags, i, h), nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (i *inodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + if i.session().cachePolicy.cacheUAttrs(inode) { + return i.cachingInodeOps.SetPermissions(ctx, inode, p) + } + + mask := p9.SetAttrMask{Permissions: true} + pattr := p9.SetAttr{Permissions: p9.FileMode(p.LinuxMode())} + // Execute the chmod. + return i.fileState.file.setAttr(ctx, mask, pattr) == nil +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *inodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + // Save the roundtrip. + if !owner.UID.Ok() && !owner.GID.Ok() { + return nil + } + + if i.session().cachePolicy.cacheUAttrs(inode) { + return i.cachingInodeOps.SetOwner(ctx, inode, owner) + } + + var mask p9.SetAttrMask + var attr p9.SetAttr + if owner.UID.Ok() { + mask.UID = true + attr.UID = p9.UID(owner.UID) + } + if owner.GID.Ok() { + mask.GID = true + attr.GID = p9.GID(owner.GID) + } + return i.fileState.file.setAttr(ctx, mask, attr) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *inodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if i.session().cachePolicy.cacheUAttrs(inode) { + return i.cachingInodeOps.SetTimestamps(ctx, inode, ts) + } + + return utimes(ctx, i.fileState.file, ts) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (i *inodeOperations) Truncate(ctx context.Context, inode *fs.Inode, length int64) error { + // This can only be called for files anyway. + if i.session().cachePolicy.useCachingInodeOps(inode) { + return i.cachingInodeOps.Truncate(ctx, inode, length) + } + if i.session().cachePolicy == cacheRemoteRevalidating { + return i.fileState.hostMappable.Truncate(ctx, length) + } + + return i.fileState.file.setAttr(ctx, p9.SetAttrMask{Size: true}, p9.SetAttr{Size: uint64(length)}) +} + +// GetXattr implements fs.InodeOperations.GetXattr. +func (i *inodeOperations) GetXattr(ctx context.Context, _ *fs.Inode, name string, size uint64) (string, error) { + return i.fileState.file.getXattr(ctx, name, size) +} + +// SetXattr implements fs.InodeOperations.SetXattr. +func (i *inodeOperations) SetXattr(ctx context.Context, _ *fs.Inode, name string, value string, flags uint32) error { + return i.fileState.file.setXattr(ctx, name, value, flags) +} + +// ListXattr implements fs.InodeOperations.ListXattr. +func (i *inodeOperations) ListXattr(ctx context.Context, _ *fs.Inode, size uint64) (map[string]struct{}, error) { + return i.fileState.file.listXattr(ctx, size) +} + +// RemoveXattr implements fs.InodeOperations.RemoveXattr. +func (i *inodeOperations) RemoveXattr(ctx context.Context, _ *fs.Inode, name string) error { + return i.fileState.file.removeXattr(ctx, name) +} + +// Allocate implements fs.InodeOperations.Allocate. +func (i *inodeOperations) Allocate(ctx context.Context, inode *fs.Inode, offset, length int64) error { + // This can only be called for files anyway. + if i.session().cachePolicy.useCachingInodeOps(inode) { + return i.cachingInodeOps.Allocate(ctx, offset, length) + } + if i.session().cachePolicy == cacheRemoteRevalidating { + return i.fileState.hostMappable.Allocate(ctx, offset, length) + } + + // No options are supported for now. + mode := p9.AllocateMode{} + return i.fileState.file.allocate(ctx, mode, uint64(offset), uint64(length)) +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + if inode.MountSource.Flags.ReadOnly || !i.session().cachePolicy.cacheUAttrs(inode) { + return nil + } + + return i.cachingInodeOps.WriteOut(ctx, inode) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if !fs.IsSymlink(inode.StableAttr) { + return "", syscall.ENOLINK + } + return i.fileState.file.readlink(ctx) +} + +// Getlink implementfs fs.InodeOperations.Getlink. +func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + if !fs.IsSymlink(i.fileState.sattr) { + return nil, syserror.ENOLINK + } + return nil, fs.ErrResolveViaReadlink +} + +// StatFS makes a StatFS request. +func (i *inodeOperations) StatFS(ctx context.Context) (fs.Info, error) { + fsstat, err := i.fileState.file.statFS(ctx) + if err != nil { + return fs.Info{}, err + } + + info := fs.Info{ + // This is primarily for distinguishing a gofer file system in + // tests. Testing is important, so instead of defining + // something completely random, use a standard value. + Type: linux.V9FS_MAGIC, + TotalBlocks: fsstat.Blocks, + FreeBlocks: fsstat.BlocksFree, + TotalFiles: fsstat.Files, + FreeFiles: fsstat.FilesFree, + } + + // If blocks available is non-zero, prefer that. + if fsstat.BlocksAvailable != 0 { + info.FreeBlocks = fsstat.BlocksAvailable + } + + return info, nil +} + +func (i *inodeOperations) configureMMap(file *fs.File, opts *memmap.MMapOpts) error { + if i.session().cachePolicy.useCachingInodeOps(file.Dirent.Inode) { + return fsutil.GenericConfigureMMap(file, i.cachingInodeOps, opts) + } + if i.fileState.hostMappable != nil { + return fsutil.GenericConfigureMMap(file, i.fileState.hostMappable, opts) + } + return syserror.ENODEV +} + +func init() { + syserror.AddErrorUnwrapper(func(err error) (syscall.Errno, bool) { + if _, ok := err.(p9.ErrSocket); ok { + // Treat as an I/O error. + return syscall.EIO, true + } + return 0, false + }) +} + +// AddLink implements InodeOperations.AddLink, but is currently a noop. +func (*inodeOperations) AddLink() {} + +// DropLink implements InodeOperations.DropLink, but is currently a noop. +func (*inodeOperations) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (i *inodeOperations) NotifyStatusChange(ctx context.Context) {} diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go new file mode 100644 index 000000000..a3402e343 --- /dev/null +++ b/pkg/sentry/fs/gofer/inode_state.go @@ -0,0 +1,171 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "errors" + "fmt" + "path/filepath" + "strings" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/time" +) + +// Some fs implementations may not support atime, ctime, or mtime in getattr. +// The unstable() logic would try to use clock time for them. However, we do not +// want to use such time during S/R as that would cause restore timestamp +// checking failure. Hence a dummy stable-time clock is needed. +// +// Note that application-visible UnstableAttrs either come from CachingInodeOps +// (in which case they are saved), or they are requested from the gofer on each +// stat (for non-caching), so the dummy time only affects the modification +// timestamp check. +type dummyClock struct { + time.Clock +} + +// Now returns a stable dummy time. +func (d *dummyClock) Now() time.Time { + return time.Time{} +} + +type dummyClockContext struct { + context.Context +} + +// Value implements context.Context +func (d *dummyClockContext) Value(key interface{}) interface{} { + switch key { + case time.CtxRealtimeClock: + return &dummyClock{} + default: + return d.Context.Value(key) + } +} + +// beforeSave is invoked by stateify. +func (i *inodeFileState) beforeSave() { + if _, ok := i.s.inodeMappings[i.sattr.InodeID]; !ok { + panic(fmt.Sprintf("failed to find path for inode number %d. Device %s contains %s", i.sattr.InodeID, i.s.connID, fs.InodeMappings(i.s.inodeMappings))) + } + if i.sattr.Type == fs.RegularFile { + uattr, err := i.unstableAttr(&dummyClockContext{context.Background()}) + if err != nil { + panic(fs.ErrSaveRejection{fmt.Errorf("failed to get unstable atttribute of %s: %v", i.s.inodeMappings[i.sattr.InodeID], err)}) + } + i.savedUAttr = &uattr + } +} + +// saveLoading is invoked by stateify. +func (i *inodeFileState) saveLoading() struct{} { + return struct{}{} +} + +// splitAbsolutePath splits the path on slashes ignoring the leading slash. +func splitAbsolutePath(path string) []string { + if len(path) == 0 { + panic("There is no path!") + } + if path != filepath.Clean(path) { + panic(fmt.Sprintf("path %q is not clean", path)) + } + // This case is to return {} rather than {""} + if path == "/" { + return []string{} + } + if path[0] != '/' { + panic(fmt.Sprintf("path %q is not absolute", path)) + } + + s := strings.Split(path, "/") + + // Since p is absolute, the first component of s + // is an empty string. We must remove that. + return s[1:] +} + +// loadLoading is invoked by stateify. +func (i *inodeFileState) loadLoading(_ struct{}) { + i.loading.Lock() +} + +// afterLoad is invoked by stateify. +func (i *inodeFileState) afterLoad() { + load := func() (err error) { + // See comment on i.loading(). + defer func() { + if err == nil { + i.loading.Unlock() + } + }() + + // Manually restore the p9.File. + name, ok := i.s.inodeMappings[i.sattr.InodeID] + if !ok { + // This should be impossible, see assertion in + // beforeSave. + return fmt.Errorf("failed to find path for inode number %d. Device %s contains %s", i.sattr.InodeID, i.s.connID, fs.InodeMappings(i.s.inodeMappings)) + } + ctx := &dummyClockContext{context.Background()} + + _, i.file, err = i.s.attach.walk(ctx, splitAbsolutePath(name)) + if err != nil { + return fs.ErrCorruption{fmt.Errorf("failed to walk to %q: %v", name, err)} + } + + // Remap the saved inode number into the gofer device using the + // actual device and actual inode that exists in our new + // environment. + qid, mask, attrs, err := i.file.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + return fs.ErrCorruption{fmt.Errorf("failed to get file attributes of %s: %v", name, err)} + } + if !mask.RDev { + return fs.ErrCorruption{fmt.Errorf("file %s lacks device", name)} + } + i.key = device.MultiDeviceKey{ + Device: attrs.RDev, + SecondaryDevice: i.s.connID, + Inode: qid.Path, + } + if !goferDevice.Load(i.key, i.sattr.InodeID) { + return fs.ErrCorruption{fmt.Errorf("gofer device %s -> %d conflict in gofer device mappings: %s", i.key, i.sattr.InodeID, goferDevice)} + } + + if i.sattr.Type == fs.RegularFile { + env, ok := fs.CurrentRestoreEnvironment() + if !ok { + return errors.New("missing restore environment") + } + uattr := unstable(ctx, mask, attrs, i.s.mounter, i.s.client) + if env.ValidateFileSize && uattr.Size != i.savedUAttr.Size { + return fs.ErrCorruption{fmt.Errorf("file size has changed for %s: previously %d, now %d", i.s.inodeMappings[i.sattr.InodeID], i.savedUAttr.Size, uattr.Size)} + } + if env.ValidateFileTimestamp && uattr.ModificationTime != i.savedUAttr.ModificationTime { + return fs.ErrCorruption{fmt.Errorf("file modification time has changed for %s: previously %v, now %v", i.s.inodeMappings[i.sattr.InodeID], i.savedUAttr.ModificationTime, uattr.ModificationTime)} + } + i.savedUAttr = nil + } + + return nil + } + + fs.Async(fs.CatchError(load)) +} diff --git a/pkg/sentry/fs/gofer/path.go b/pkg/sentry/fs/gofer/path.go new file mode 100644 index 000000000..cf9800100 --- /dev/null +++ b/pkg/sentry/fs/gofer/path.go @@ -0,0 +1,495 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/pipe" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +// maxFilenameLen is the maximum length of a filename. This is dictated by 9P's +// encoding of strings, which uses 2 bytes for the length prefix. +const maxFilenameLen = (1 << 16) - 1 + +func changeType(mode p9.FileMode, newType p9.FileMode) p9.FileMode { + if newType&^p9.FileModeMask != 0 { + panic(fmt.Sprintf("newType contained more bits than just file mode: %x", newType)) + } + clear := mode &^ p9.FileModeMask + return clear | newType +} + +// Lookup loads an Inode at name into a Dirent based on the session's cache +// policy. +func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + if len(name) > maxFilenameLen { + return nil, syserror.ENAMETOOLONG + } + + cp := i.session().cachePolicy + if cp.cacheReaddir() { + // Check to see if we have readdirCache that indicates the + // child does not exist. Avoid holding readdirMu longer than + // we need to. + i.readdirMu.Lock() + if i.readdirCache != nil && !i.readdirCache.Contains(name) { + // No such child. + i.readdirMu.Unlock() + if cp.cacheNegativeDirents() { + return fs.NewNegativeDirent(name), nil + } + return nil, syserror.ENOENT + } + i.readdirMu.Unlock() + } + + // Get a p9.File for name. + qids, newFile, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name}) + if err != nil { + if err == syserror.ENOENT { + if cp.cacheNegativeDirents() { + // Return a negative Dirent. It will stay cached until something + // is created over it. + return fs.NewNegativeDirent(name), nil + } + return nil, syserror.ENOENT + } + return nil, err + } + + if i.session().overrides != nil { + // Check if file belongs to a internal named pipe. Note that it doesn't need + // to check for sockets because it's done in newInodeOperations below. + deviceKey := device.MultiDeviceKey{ + Device: p9attr.RDev, + SecondaryDevice: i.session().connID, + Inode: qids[0].Path, + } + unlock := i.session().overrides.lock() + if pipeInode := i.session().overrides.getPipe(deviceKey); pipeInode != nil { + unlock() + pipeInode.IncRef() + return fs.NewDirent(ctx, pipeInode, name), nil + } + unlock() + } + + // Construct the Inode operations. + sattr, node := newInodeOperations(ctx, i.fileState.s, newFile, qids[0], mask, p9attr) + + // Construct a positive Dirent. + return fs.NewDirent(ctx, fs.NewInode(ctx, node, dir.MountSource, sattr), name), nil +} + +// Creates a new Inode at name and returns its File based on the session's cache policy. +// +// Ownership is currently ignored. +func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + if len(name) > maxFilenameLen { + return nil, syserror.ENAMETOOLONG + } + + // Create replaces the directory fid with the newly created/opened + // file, so clone this directory so it doesn't change out from under + // this node. + _, newFile, err := i.fileState.file.walk(ctx, nil) + if err != nil { + return nil, err + } + + // Map the FileFlags to p9 OpenFlags. + var openFlags p9.OpenFlags + switch { + case flags.Read && flags.Write: + openFlags = p9.ReadWrite + case flags.Read: + openFlags = p9.ReadOnly + case flags.Write: + openFlags = p9.WriteOnly + default: + panic(fmt.Sprintf("Create called with unknown or unset open flags: %v", flags)) + } + + owner := fs.FileOwnerFromContext(ctx) + hostFile, err := newFile.create(ctx, name, openFlags, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)) + if err != nil { + // Could not create the file. + newFile.close(ctx) + return nil, err + } + + i.touchModificationAndStatusChangeTime(ctx, dir) + + // Get an unopened p9.File for the file we created so that it can be cloned + // and re-opened multiple times after creation, while also getting its + // attributes. Both are required for inodeOperations. + qids, unopened, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name}) + if err != nil { + newFile.close(ctx) + if hostFile != nil { + hostFile.Close() + } + return nil, err + } + if len(qids) != 1 { + log.Warningf("WalkGetAttr(%s) succeeded, but returned %d QIDs (%v), wanted 1", name, len(qids), qids) + newFile.close(ctx) + if hostFile != nil { + hostFile.Close() + } + unopened.close(ctx) + return nil, syserror.EIO + } + qid := qids[0] + + // Construct the InodeOperations. + sattr, iops := newInodeOperations(ctx, i.fileState.s, unopened, qid, mask, p9attr) + + // Construct the positive Dirent. + d := fs.NewDirent(ctx, fs.NewInode(ctx, iops, dir.MountSource, sattr), name) + defer d.DecRef() + + // Construct the new file, caching the handles if allowed. + h := handles{ + File: newFile, + Host: hostFile, + } + h.EnableLeakCheck("gofer.handles") + if iops.fileState.canShareHandles() { + iops.fileState.handlesMu.Lock() + iops.fileState.setSharedHandlesLocked(flags, &h) + iops.fileState.handlesMu.Unlock() + } + return NewFile(ctx, d, name, flags, iops, &h), nil +} + +// CreateLink uses Create to create a symlink between oldname and newname. +func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error { + if len(newname) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + owner := fs.FileOwnerFromContext(ctx) + if _, err := i.fileState.file.symlink(ctx, oldname, newname, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil { + return err + } + i.touchModificationAndStatusChangeTime(ctx, dir) + return nil +} + +// CreateHardLink implements InodeOperations.CreateHardLink. +func (i *inodeOperations) CreateHardLink(ctx context.Context, inode *fs.Inode, target *fs.Inode, newName string) error { + if len(newName) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + targetOpts, ok := target.InodeOperations.(*inodeOperations) + if !ok { + return syserror.EXDEV + } + + if err := i.fileState.file.link(ctx, &targetOpts.fileState.file, newName); err != nil { + return err + } + if i.session().cachePolicy.cacheUAttrs(inode) { + // Increase link count. + targetOpts.cachingInodeOps.IncLinks(ctx) + } + i.touchModificationAndStatusChangeTime(ctx, inode) + return nil +} + +// CreateDirectory uses Create to create a directory named s under inodeOperations. +func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s string, perm fs.FilePermissions) error { + if len(s) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + owner := fs.FileOwnerFromContext(ctx) + if _, err := i.fileState.file.mkdir(ctx, s, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil { + return err + } + if i.session().cachePolicy.cacheUAttrs(dir) { + // Increase link count. + // + // N.B. This will update the modification time. + i.cachingInodeOps.IncLinks(ctx) + } + if i.session().cachePolicy.cacheReaddir() { + // Invalidate readdir cache. + i.markDirectoryDirty() + } + return nil +} + +// Bind implements InodeOperations.Bind. +func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) { + if len(name) > maxFilenameLen { + return nil, syserror.ENAMETOOLONG + } + + if i.session().overrides == nil { + return nil, syserror.EOPNOTSUPP + } + + // Stabilize the override map while creation is in progress. + unlock := i.session().overrides.lock() + defer unlock() + + sattr, iops, err := i.createEndpointFile(ctx, dir, name, perm, p9.ModeSocket) + if err != nil { + return nil, err + } + + // Construct the positive Dirent. + childDir := fs.NewDirent(ctx, fs.NewInode(ctx, iops, dir.MountSource, sattr), name) + i.session().overrides.addBoundEndpoint(iops.fileState.key, childDir, ep) + return childDir, nil +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (i *inodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + if len(name) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + owner := fs.FileOwnerFromContext(ctx) + mode := p9.FileMode(perm.LinuxMode()) | p9.ModeNamedPipe + + // N.B. FIFOs use major/minor numbers 0. + if _, err := i.fileState.file.mknod(ctx, name, mode, 0, 0, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil { + if i.session().overrides == nil || err != syserror.EPERM { + return err + } + // If gofer doesn't support mknod, check if we can create an internal fifo. + return i.createInternalFifo(ctx, dir, name, owner, perm) + } + + i.touchModificationAndStatusChangeTime(ctx, dir) + return nil +} + +func (i *inodeOperations) createInternalFifo(ctx context.Context, dir *fs.Inode, name string, owner fs.FileOwner, perm fs.FilePermissions) error { + if i.session().overrides == nil { + return syserror.EPERM + } + + // Stabilize the override map while creation is in progress. + unlock := i.session().overrides.lock() + defer unlock() + + sattr, fileOps, err := i.createEndpointFile(ctx, dir, name, perm, p9.ModeNamedPipe) + if err != nil { + return err + } + + // First create a pipe. + p := pipe.NewPipe(true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize) + + // Wrap the fileOps with our Fifo. + iops := &fifo{ + InodeOperations: pipe.NewInodeOperations(ctx, perm, p), + fileIops: fileOps, + } + inode := fs.NewInode(ctx, iops, dir.MountSource, sattr) + + // Construct the positive Dirent. + childDir := fs.NewDirent(ctx, fs.NewInode(ctx, iops, dir.MountSource, sattr), name) + i.session().overrides.addPipe(fileOps.fileState.key, childDir, inode) + return nil +} + +// Caller must hold Session.endpoint lock. +func (i *inodeOperations) createEndpointFile(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions, fileType p9.FileMode) (fs.StableAttr, *inodeOperations, error) { + _, dirClone, err := i.fileState.file.walk(ctx, nil) + if err != nil { + return fs.StableAttr{}, nil, err + } + // We're not going to use dirClone after return. + defer dirClone.close(ctx) + + // Create a regular file in the gofer and then mark it as a socket by + // adding this inode key in the 'overrides' map. + owner := fs.FileOwnerFromContext(ctx) + hostFile, err := dirClone.create(ctx, name, p9.ReadWrite, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)) + if err != nil { + return fs.StableAttr{}, nil, err + } + // We're not going to use this file. + hostFile.Close() + + i.touchModificationAndStatusChangeTime(ctx, dir) + + // Get the attributes of the file to create inode key. + qid, mask, attr, err := getattr(ctx, dirClone) + if err != nil { + return fs.StableAttr{}, nil, err + } + + // Get an unopened p9.File for the file we created so that it can be + // cloned and re-opened multiple times after creation. + _, unopened, err := i.fileState.file.walk(ctx, []string{name}) + if err != nil { + return fs.StableAttr{}, nil, err + } + + // Construct new inode with file type overridden. + attr.Mode = changeType(attr.Mode, fileType) + sattr, iops := newInodeOperations(ctx, i.fileState.s, unopened, qid, mask, attr) + return sattr, iops, nil +} + +// Remove implements InodeOperations.Remove. +func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + if len(name) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + var key *device.MultiDeviceKey + if i.session().overrides != nil { + // Find out if file being deleted is a socket or pipe that needs to be + // removed from endpoint map. + if d, err := i.Lookup(ctx, dir, name); err == nil { + defer d.DecRef() + + if fs.IsSocket(d.Inode.StableAttr) || fs.IsPipe(d.Inode.StableAttr) { + switch iops := d.Inode.InodeOperations.(type) { + case *inodeOperations: + key = &iops.fileState.key + case *fifo: + key = &iops.fileIops.fileState.key + } + + // Stabilize the override map while deletion is in progress. + unlock := i.session().overrides.lock() + defer unlock() + } + } + } + + if err := i.fileState.file.unlinkAt(ctx, name, 0); err != nil { + return err + } + if key != nil { + i.session().overrides.remove(*key) + } + i.touchModificationAndStatusChangeTime(ctx, dir) + + return nil +} + +// Remove implements InodeOperations.RemoveDirectory. +func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + if len(name) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + // 0x200 = AT_REMOVEDIR. + if err := i.fileState.file.unlinkAt(ctx, name, 0x200); err != nil { + return err + } + if i.session().cachePolicy.cacheUAttrs(dir) { + // Decrease link count and updates atime. + i.cachingInodeOps.DecLinks(ctx) + } + if i.session().cachePolicy.cacheReaddir() { + // Invalidate readdir cache. + i.markDirectoryDirty() + } + return nil +} + +// Rename renames this node. +func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + if len(newName) > maxFilenameLen { + return syserror.ENAMETOOLONG + } + + // Don't allow renames across different mounts. + if newParent.MountSource != oldParent.MountSource { + return syserror.EXDEV + } + + // Unwrap the new parent to a *inodeOperations. + newParentInodeOperations := newParent.InodeOperations.(*inodeOperations) + + // Unwrap the old parent to a *inodeOperations. + oldParentInodeOperations := oldParent.InodeOperations.(*inodeOperations) + + // Do the rename. + if err := i.fileState.file.rename(ctx, newParentInodeOperations.fileState.file, newName); err != nil { + return err + } + + // Is the renamed entity a directory? Fix link counts. + if fs.IsDir(i.fileState.sattr) { + // Update cached state. + if i.session().cachePolicy.cacheUAttrs(oldParent) { + oldParentInodeOperations.cachingInodeOps.DecLinks(ctx) + } + if i.session().cachePolicy.cacheUAttrs(newParent) { + // Only IncLinks if there is a new addition to + // newParent. If this is replacement, then the total + // count remains the same. + if !replacement { + newParentInodeOperations.cachingInodeOps.IncLinks(ctx) + } + } + } + if i.session().cachePolicy.cacheReaddir() { + // Mark old directory dirty. + oldParentInodeOperations.markDirectoryDirty() + if oldParent != newParent { + // Mark new directory dirty. + newParentInodeOperations.markDirectoryDirty() + } + } + + // Rename always updates ctime. + if i.session().cachePolicy.cacheUAttrs(inode) { + i.cachingInodeOps.TouchStatusChangeTime(ctx) + } + return nil +} + +func (i *inodeOperations) touchModificationAndStatusChangeTime(ctx context.Context, inode *fs.Inode) { + if i.session().cachePolicy.cacheUAttrs(inode) { + i.cachingInodeOps.TouchModificationAndStatusChangeTime(ctx) + } + if i.session().cachePolicy.cacheReaddir() { + // Invalidate readdir cache. + i.markDirectoryDirty() + } +} + +// markDirectoryDirty marks any cached data dirty for this directory. This is necessary in order +// to ensure that this node does not retain stale state throughout its lifetime across multiple +// open directory handles. +// +// Currently this means invalidating any readdir caches. +func (i *inodeOperations) markDirectoryDirty() { + i.readdirMu.Lock() + defer i.readdirMu.Unlock() + i.readdirCache = nil +} diff --git a/pkg/sentry/fs/gofer/session.go b/pkg/sentry/fs/gofer/session.go new file mode 100644 index 000000000..b5efc86f2 --- /dev/null +++ b/pkg/sentry/fs/gofer/session.go @@ -0,0 +1,426 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/unet" +) + +// DefaultDirentCacheSize is the default dirent cache size for 9P mounts. It can +// be adjusted independently from the other dirent caches. +var DefaultDirentCacheSize uint64 = fs.DefaultDirentCacheSize + +// +stateify savable +type overrideInfo struct { + dirent *fs.Dirent + + // endpoint is set when dirent points to a socket. inode must not be set. + endpoint transport.BoundEndpoint + + // inode is set when dirent points to a pipe. endpoint must not be set. + inode *fs.Inode +} + +func (l *overrideInfo) inodeType() fs.InodeType { + switch { + case l.endpoint != nil: + return fs.Socket + case l.inode != nil: + return fs.Pipe + } + panic("endpoint or node must be set") +} + +// +stateify savable +type overrideMaps struct { + // mu protexts the keyMap, and the pathMap below. + mu sync.RWMutex `state:"nosave"` + + // keyMap links MultiDeviceKeys (containing inode IDs) to their sockets/pipes. + // It is not stored during save because the inode ID may change upon restore. + keyMap map[device.MultiDeviceKey]*overrideInfo `state:"nosave"` + + // pathMap links the sockets/pipes to their paths. + // It is filled before saving from the direntMap and is stored upon save. + // Upon restore, this map is used to re-populate the keyMap. + pathMap map[*overrideInfo]string +} + +// addBoundEndpoint adds the bound endpoint to the map. +// A reference is taken on the dirent argument. +// +// Precondition: maps must have been locked with 'lock'. +func (e *overrideMaps) addBoundEndpoint(key device.MultiDeviceKey, d *fs.Dirent, ep transport.BoundEndpoint) { + d.IncRef() + e.keyMap[key] = &overrideInfo{dirent: d, endpoint: ep} +} + +// addPipe adds the pipe inode to the map. +// A reference is taken on the dirent argument. +// +// Precondition: maps must have been locked with 'lock'. +func (e *overrideMaps) addPipe(key device.MultiDeviceKey, d *fs.Dirent, inode *fs.Inode) { + d.IncRef() + e.keyMap[key] = &overrideInfo{dirent: d, inode: inode} +} + +// remove deletes the key from the maps. +// +// Precondition: maps must have been locked with 'lock'. +func (e *overrideMaps) remove(key device.MultiDeviceKey) { + endpoint := e.keyMap[key] + delete(e.keyMap, key) + endpoint.dirent.DecRef() +} + +// lock blocks other addition and removal operations from happening while +// the backing file is being created or deleted. Returns a function that unlocks +// the endpoint map. +func (e *overrideMaps) lock() func() { + e.mu.Lock() + return func() { e.mu.Unlock() } +} + +// getBoundEndpoint returns the bound endpoint mapped to the given key. +// +// Precondition: maps must have been locked. +func (e *overrideMaps) getBoundEndpoint(key device.MultiDeviceKey) transport.BoundEndpoint { + if v := e.keyMap[key]; v != nil { + return v.endpoint + } + return nil +} + +// getPipe returns the pipe inode mapped to the given key. +// +// Precondition: maps must have been locked. +func (e *overrideMaps) getPipe(key device.MultiDeviceKey) *fs.Inode { + if v := e.keyMap[key]; v != nil { + return v.inode + } + return nil +} + +// getType returns the inode type if there is a corresponding endpoint for the +// given key. Returns false otherwise. +func (e *overrideMaps) getType(key device.MultiDeviceKey) (fs.InodeType, bool) { + e.mu.Lock() + v := e.keyMap[key] + e.mu.Unlock() + + if v != nil { + return v.inodeType(), true + } + return 0, false +} + +// session holds state for each 9p session established during sys_mount. +// +// +stateify savable +type session struct { + refs.AtomicRefCount + + // msize is the value of the msize mount option, see fs/gofer/fs.go. + msize uint32 `state:"wait"` + + // version is the value of the version mount option, see fs/gofer/fs.go. + version string `state:"wait"` + + // cachePolicy is the cache policy. + cachePolicy cachePolicy `state:"wait"` + + // aname is the value of the aname mount option, see fs/gofer/fs.go. + aname string `state:"wait"` + + // The client associated with this session. This will be initialized lazily. + client *p9.Client `state:"nosave"` + + // The p9.File pointing to attachName via the client. This will be initialized + // lazily. + attach contextFile `state:"nosave"` + + // Flags provided to the mount. + superBlockFlags fs.MountSourceFlags `state:"wait"` + + // limitHostFDTranslation is the value used for + // CachingInodeOperationsOptions.LimitHostFDTranslation for all + // CachingInodeOperations created by the session. + limitHostFDTranslation bool + + // overlayfsStaleRead when set causes the readonly handle to be invalidated + // after file is open for write. + overlayfsStaleRead bool + + // connID is a unique identifier for the session connection. + connID string `state:"wait"` + + // inodeMappings contains mappings of fs.Inodes associated with this session + // to paths relative to the attach point, where inodeMappings is keyed by + // Inode.StableAttr.InodeID. + inodeMappings map[uint64]string `state:"wait"` + + // mounter is the EUID/EGID that mounted this file system. + mounter fs.FileOwner `state:"wait"` + + // overrides is used to map inodes that represent socket/pipes files to their + // corresponding endpoint/iops. These files are created as regular files in + // the gofer and their presence in this map indicate that they should indeed + // be socket/pipe files. This allows unix domain sockets and named pipes to + // be used with paths that belong to a gofer. + // + // There are a few possible races with someone stat'ing the file and another + // deleting it concurrently, where the file will not be reported as socket + // file. + overrides *overrideMaps `state:"wait"` +} + +// Destroy tears down the session. +func (s *session) Destroy() { + s.client.Close() +} + +// Revalidate implements MountSourceOperations.Revalidate. +func (s *session) Revalidate(ctx context.Context, name string, parent, child *fs.Inode) bool { + return s.cachePolicy.revalidate(ctx, name, parent, child) +} + +// Keep implements MountSourceOperations.Keep. +func (s *session) Keep(d *fs.Dirent) bool { + return s.cachePolicy.keep(d) +} + +// CacheReaddir implements MountSourceOperations.CacheReaddir. +func (s *session) CacheReaddir() bool { + return s.cachePolicy.cacheReaddir() +} + +// ResetInodeMappings implements fs.MountSourceOperations.ResetInodeMappings. +func (s *session) ResetInodeMappings() { + s.inodeMappings = make(map[uint64]string) +} + +// SaveInodeMapping implements fs.MountSourceOperations.SaveInodeMapping. +func (s *session) SaveInodeMapping(inode *fs.Inode, path string) { + // This is very unintuitive. We *CANNOT* trust the inode's StableAttrs, + // because overlay copyUp may have changed them out from under us. + // So much for "immutable". + switch iops := inode.InodeOperations.(type) { + case *inodeOperations: + s.inodeMappings[iops.fileState.sattr.InodeID] = path + case *fifo: + s.inodeMappings[iops.fileIops.fileState.sattr.InodeID] = path + default: + panic(fmt.Sprintf("Invalid type: %T", iops)) + } +} + +// newInodeOperations creates a new 9p fs.InodeOperations backed by a p9.File +// and attributes (p9.QID, p9.AttrMask, p9.Attr). +// +// Endpoints lock must not be held if socket == false. +func newInodeOperations(ctx context.Context, s *session, file contextFile, qid p9.QID, valid p9.AttrMask, attr p9.Attr) (fs.StableAttr, *inodeOperations) { + deviceKey := device.MultiDeviceKey{ + Device: attr.RDev, + SecondaryDevice: s.connID, + Inode: qid.Path, + } + + sattr := fs.StableAttr{ + Type: ntype(attr), + DeviceID: goferDevice.DeviceID(), + InodeID: goferDevice.Map(deviceKey), + BlockSize: bsize(attr), + } + + if s.overrides != nil && sattr.Type == fs.RegularFile { + // If overrides are allowed on this filesystem, check if this file is + // supposed to be of a different type, e.g. socket. + if t, ok := s.overrides.getType(deviceKey); ok { + sattr.Type = t + } + } + + fileState := &inodeFileState{ + s: s, + file: file, + sattr: sattr, + key: deviceKey, + } + if s.cachePolicy == cacheRemoteRevalidating && fs.IsFile(sattr) { + fileState.hostMappable = fsutil.NewHostMappable(fileState) + } + + uattr := unstable(ctx, valid, attr, s.mounter, s.client) + return sattr, &inodeOperations{ + fileState: fileState, + cachingInodeOps: fsutil.NewCachingInodeOperations(ctx, fileState, uattr, fsutil.CachingInodeOperationsOptions{ + ForcePageCache: s.superBlockFlags.ForcePageCache, + LimitHostFDTranslation: s.limitHostFDTranslation, + }), + } +} + +// Root returns the root of a 9p mount. This mount is bound to a 9p server +// based on conn. Otherwise configuration parameters are: +// +// * dev: connection id +// * filesystem: the filesystem backing the mount +// * superBlockFlags: the mount flags describing general mount options +// * opts: parsed 9p mount options +func Root(ctx context.Context, dev string, filesystem fs.Filesystem, superBlockFlags fs.MountSourceFlags, o opts) (*fs.Inode, error) { + // The mounting EUID/EGID will be cached by this file system. This will + // be used to assign ownership to files that the Gofer owns. + mounter := fs.FileOwnerFromContext(ctx) + + conn, err := unet.NewSocket(o.fd) + if err != nil { + return nil, err + } + + // Construct the session. + s := session{ + connID: dev, + msize: o.msize, + version: o.version, + cachePolicy: o.policy, + aname: o.aname, + superBlockFlags: superBlockFlags, + limitHostFDTranslation: o.limitHostFDTranslation, + overlayfsStaleRead: o.overlayfsStaleRead, + mounter: mounter, + } + s.EnableLeakCheck("gofer.session") + + if o.privateunixsocket { + s.overrides = newOverrideMaps() + } + + // Construct the MountSource with the session and superBlockFlags. + m := fs.NewMountSource(ctx, &s, filesystem, superBlockFlags) + + // Given that gofer files can consume host FDs, restrict the number + // of files that can be held by the cache. + m.SetDirentCacheMaxSize(DefaultDirentCacheSize) + m.SetDirentCacheLimiter(fs.DirentCacheLimiterFromContext(ctx)) + + // Send the Tversion request. + s.client, err = p9.NewClient(conn, s.msize, s.version) + if err != nil { + // Drop our reference on the session, it needs to be torn down. + s.DecRef() + return nil, err + } + + // Notify that we're about to call the Gofer and block. + ctx.UninterruptibleSleepStart(false) + // Send the Tattach request. + s.attach.file, err = s.client.Attach(s.aname) + ctx.UninterruptibleSleepFinish(false) + if err != nil { + // Same as above. + s.DecRef() + return nil, err + } + + qid, valid, attr, err := s.attach.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + s.attach.close(ctx) + // Same as above, but after we execute the Close request. + s.DecRef() + return nil, err + } + + sattr, iops := newInodeOperations(ctx, &s, s.attach, qid, valid, attr) + return fs.NewInode(ctx, iops, m, sattr), nil +} + +// newOverrideMaps creates a new overrideMaps. +func newOverrideMaps() *overrideMaps { + return &overrideMaps{ + keyMap: make(map[device.MultiDeviceKey]*overrideInfo), + pathMap: make(map[*overrideInfo]string), + } +} + +// fillKeyMap populates key and dirent maps upon restore from saved pathmap. +func (s *session) fillKeyMap(ctx context.Context) error { + unlock := s.overrides.lock() + defer unlock() + + for ep, dirPath := range s.overrides.pathMap { + _, file, err := s.attach.walk(ctx, splitAbsolutePath(dirPath)) + if err != nil { + return fmt.Errorf("error filling endpointmaps, failed to walk to %q: %v", dirPath, err) + } + + qid, _, attr, err := file.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + return fmt.Errorf("failed to get file attributes of %s: %v", dirPath, err) + } + + key := device.MultiDeviceKey{ + Device: attr.RDev, + SecondaryDevice: s.connID, + Inode: qid.Path, + } + + s.overrides.keyMap[key] = ep + } + return nil +} + +// fillPathMap populates paths for overrides from dirents in direntMap +// before save. +func (s *session) fillPathMap() error { + unlock := s.overrides.lock() + defer unlock() + + for _, endpoint := range s.overrides.keyMap { + mountRoot := endpoint.dirent.MountRoot() + defer mountRoot.DecRef() + dirPath, _ := endpoint.dirent.FullName(mountRoot) + if dirPath == "" { + return fmt.Errorf("error getting path from dirent") + } + s.overrides.pathMap[endpoint] = dirPath + } + return nil +} + +// restoreEndpointMaps recreates and fills the key and dirent maps. +func (s *session) restoreEndpointMaps(ctx context.Context) error { + // When restoring, only need to create the keyMap because the dirent and path + // maps got stored through the save. + s.overrides.keyMap = make(map[device.MultiDeviceKey]*overrideInfo) + if err := s.fillKeyMap(ctx); err != nil { + return fmt.Errorf("failed to insert sockets into endpoint map: %v", err) + } + + // Re-create pathMap because it can no longer be trusted as socket paths can + // change while process continues to run. Empty pathMap will be re-filled upon + // next save. + s.overrides.pathMap = make(map[*overrideInfo]string) + return nil +} diff --git a/pkg/sentry/fs/gofer/session_state.go b/pkg/sentry/fs/gofer/session_state.go new file mode 100644 index 000000000..2d398b753 --- /dev/null +++ b/pkg/sentry/fs/gofer/session_state.go @@ -0,0 +1,113 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/unet" +) + +// beforeSave is invoked by stateify. +func (s *session) beforeSave() { + if s.overrides != nil { + if err := s.fillPathMap(); err != nil { + panic("failed to save paths to override map before saving" + err.Error()) + } + } +} + +// afterLoad is invoked by stateify. +func (s *session) afterLoad() { + // The restore environment contains the 9p connection of this mount. + fsys := filesystem{} + env, ok := fs.CurrentRestoreEnvironment() + if !ok { + panic("failed to find restore environment") + } + mounts, ok := env.MountSources[fsys.Name()] + if !ok { + panic("failed to find mounts for filesystem type " + fsys.Name()) + } + var args fs.MountArgs + var found bool + for _, mount := range mounts { + if mount.Dev == s.connID { + args = mount + found = true + } + } + if !found { + panic(fmt.Sprintf("no connection for connection id %q", s.connID)) + } + + // Validate the mount flags and options. + opts, err := options(args.DataString) + if err != nil { + panic("failed to parse mount options: " + err.Error()) + } + if opts.msize != s.msize { + panic(fmt.Sprintf("new message size %v, want %v", opts.msize, s.msize)) + } + if opts.version != s.version { + panic(fmt.Sprintf("new version %v, want %v", opts.version, s.version)) + } + if opts.policy != s.cachePolicy { + panic(fmt.Sprintf("new cache policy %v, want %v", opts.policy, s.cachePolicy)) + } + if opts.aname != s.aname { + panic(fmt.Sprintf("new attach name %v, want %v", opts.aname, s.aname)) + } + + // Check if overrideMaps exist when uds sockets are enabled (only pathmaps + // will actually have been saved). + if opts.privateunixsocket != (s.overrides != nil) { + panic(fmt.Sprintf("new privateunixsocket option %v, want %v", opts.privateunixsocket, s.overrides != nil)) + } + if args.Flags != s.superBlockFlags { + panic(fmt.Sprintf("new mount flags %v, want %v", args.Flags, s.superBlockFlags)) + } + + // Manually restore the connection. + conn, err := unet.NewSocket(opts.fd) + if err != nil { + panic(fmt.Sprintf("failed to create Socket for FD %d: %v", opts.fd, err)) + } + + // Manually restore the client. + s.client, err = p9.NewClient(conn, s.msize, s.version) + if err != nil { + panic(fmt.Sprintf("failed to connect client to server: %v", err)) + } + + // Manually restore the attach point. + s.attach.file, err = s.client.Attach(s.aname) + if err != nil { + panic(fmt.Sprintf("failed to attach to aname: %v", err)) + } + + // If private unix sockets are enabled, create and fill the session's endpoint + // maps. + if opts.privateunixsocket { + ctx := &dummyClockContext{context.Background()} + + if err = s.restoreEndpointMaps(ctx); err != nil { + panic("failed to restore endpoint maps: " + err.Error()) + } + } +} diff --git a/pkg/sentry/fs/gofer/socket.go b/pkg/sentry/fs/gofer/socket.go new file mode 100644 index 000000000..40f2c1cad --- /dev/null +++ b/pkg/sentry/fs/gofer/socket.go @@ -0,0 +1,152 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/host" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/syserr" + "gvisor.dev/gvisor/pkg/waiter" +) + +// BoundEndpoint returns a gofer-backed transport.BoundEndpoint. +func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) transport.BoundEndpoint { + if !fs.IsSocket(i.fileState.sattr) { + return nil + } + + if i.session().overrides != nil { + unlock := i.session().overrides.lock() + defer unlock() + ep := i.session().overrides.getBoundEndpoint(i.fileState.key) + if ep != nil { + return ep + } + + // Not found in overrides map, it may be a gofer backed unix socket... + } + + inode.IncRef() + return &endpoint{inode, i.fileState.file.file, path} +} + +// LINT.IfChange + +// endpoint is a Gofer-backed transport.BoundEndpoint. +// +// An endpoint's lifetime is the time between when InodeOperations.BoundEndpoint() +// is called and either BoundEndpoint.BidirectionalConnect or +// BoundEndpoint.UnidirectionalConnect is called. +type endpoint struct { + // inode is the filesystem inode which produced this endpoint. + inode *fs.Inode + + // file is the p9 file that contains a single unopened fid. + file p9.File + + // path is the sentry path where this endpoint is bound. + path string +} + +func sockTypeToP9(t linux.SockType) (p9.ConnectFlags, bool) { + switch t { + case linux.SOCK_STREAM: + return p9.StreamSocket, true + case linux.SOCK_SEQPACKET: + return p9.SeqpacketSocket, true + case linux.SOCK_DGRAM: + return p9.DgramSocket, true + } + return 0, false +} + +// BidirectionalConnect implements ConnectableEndpoint.BidirectionalConnect. +func (e *endpoint) BidirectionalConnect(ctx context.Context, ce transport.ConnectingEndpoint, returnConnect func(transport.Receiver, transport.ConnectedEndpoint)) *syserr.Error { + cf, ok := sockTypeToP9(ce.Type()) + if !ok { + return syserr.ErrConnectionRefused + } + + // No lock ordering required as only the ConnectingEndpoint has a mutex. + ce.Lock() + + // Check connecting state. + if ce.Connected() { + ce.Unlock() + return syserr.ErrAlreadyConnected + } + if ce.Listening() { + ce.Unlock() + return syserr.ErrInvalidEndpointState + } + + hostFile, err := e.file.Connect(cf) + if err != nil { + ce.Unlock() + return syserr.ErrConnectionRefused + } + + c, serr := host.NewConnectedEndpoint(ctx, hostFile, ce.WaiterQueue(), e.path) + if serr != nil { + ce.Unlock() + log.Warningf("Gofer returned invalid host socket for BidirectionalConnect; file %+v flags %+v: %v", e.file, cf, serr) + return serr + } + + returnConnect(c, c) + ce.Unlock() + c.Init() + + return nil +} + +// UnidirectionalConnect implements +// transport.BoundEndpoint.UnidirectionalConnect. +func (e *endpoint) UnidirectionalConnect(ctx context.Context) (transport.ConnectedEndpoint, *syserr.Error) { + hostFile, err := e.file.Connect(p9.DgramSocket) + if err != nil { + return nil, syserr.ErrConnectionRefused + } + + c, serr := host.NewConnectedEndpoint(ctx, hostFile, &waiter.Queue{}, e.path) + if serr != nil { + log.Warningf("Gofer returned invalid host socket for UnidirectionalConnect; file %+v: %v", e.file, serr) + return nil, serr + } + c.Init() + + // We don't need the receiver. + c.CloseRecv() + c.Release() + + return c, nil +} + +// Release implements transport.BoundEndpoint.Release. +func (e *endpoint) Release() { + e.inode.DecRef() +} + +// Passcred implements transport.BoundEndpoint.Passcred. +func (e *endpoint) Passcred() bool { + return false +} + +// LINT.ThenChange(../../fsimpl/gofer/socket.go) diff --git a/pkg/sentry/fs/gofer/util.go b/pkg/sentry/fs/gofer/util.go new file mode 100644 index 000000000..47a6c69bf --- /dev/null +++ b/pkg/sentry/fs/gofer/util.go @@ -0,0 +1,72 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/p9" + "gvisor.dev/gvisor/pkg/sentry/fs" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" +) + +func utimes(ctx context.Context, file contextFile, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + // Replace requests to use the "system time" with the current time to + // ensure that timestamps remain consistent with the remote + // filesystem. + now := ktime.NowFromContext(ctx) + if ts.ATimeSetSystemTime { + ts.ATime = now + } + if ts.MTimeSetSystemTime { + ts.MTime = now + } + mask := p9.SetAttrMask{ + ATime: !ts.ATimeOmit, + ATimeNotSystemTime: true, + MTime: !ts.MTimeOmit, + MTimeNotSystemTime: true, + } + as, ans := ts.ATime.Unix() + ms, mns := ts.MTime.Unix() + attr := p9.SetAttr{ + ATimeSeconds: uint64(as), + ATimeNanoSeconds: uint64(ans), + MTimeSeconds: uint64(ms), + MTimeNanoSeconds: uint64(mns), + } + // 9p2000.L SetAttr: "If a time bit is set without the corresponding SET bit, + // the current system time on the server is used instead of the value sent + // in the request." + return file.setAttr(ctx, mask, attr) +} + +func openFlagsFromPerms(p fs.PermMask) (p9.OpenFlags, error) { + switch { + case p.Read && p.Write: + return p9.ReadWrite, nil + case p.Write: + return p9.WriteOnly, nil + case p.Read: + return p9.ReadOnly, nil + default: + return 0, syscall.EINVAL + } +} diff --git a/pkg/sentry/fs/host/BUILD b/pkg/sentry/fs/host/BUILD new file mode 100644 index 000000000..aabce6cc9 --- /dev/null +++ b/pkg/sentry/fs/host/BUILD @@ -0,0 +1,82 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "host", + srcs = [ + "control.go", + "descriptor.go", + "descriptor_state.go", + "device.go", + "file.go", + "host.go", + "inode.go", + "inode_state.go", + "ioctl_unsafe.go", + "socket.go", + "socket_iovec.go", + "socket_state.go", + "socket_unsafe.go", + "tty.go", + "util.go", + "util_amd64_unsafe.go", + "util_arm64_unsafe.go", + "util_unsafe.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/fd", + "//pkg/fdnotifier", + "//pkg/log", + "//pkg/refs", + "//pkg/safemem", + "//pkg/secio", + "//pkg/sentry/arch", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/socket/control", + "//pkg/sentry/socket/unix", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/unimpl", + "//pkg/sentry/uniqueid", + "//pkg/sync", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/unet", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "host_test", + size = "small", + srcs = [ + "descriptor_test.go", + "inode_test.go", + "socket_test.go", + "wait_test.go", + ], + library = ":host", + deps = [ + "//pkg/fd", + "//pkg/fdnotifier", + "//pkg/sentry/contexttest", + "//pkg/sentry/kernel/time", + "//pkg/sentry/socket", + "//pkg/sentry/socket/unix/transport", + "//pkg/syserr", + "//pkg/tcpip", + "//pkg/usermem", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/host/control.go b/pkg/sentry/fs/host/control.go new file mode 100644 index 000000000..39299b7e4 --- /dev/null +++ b/pkg/sentry/fs/host/control.go @@ -0,0 +1,97 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/socket/control" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" +) + +// LINT.IfChange + +type scmRights struct { + fds []int +} + +func newSCMRights(fds []int) control.SCMRights { + return &scmRights{fds} +} + +// Files implements control.SCMRights.Files. +func (c *scmRights) Files(ctx context.Context, max int) (control.RightsFiles, bool) { + n := max + var trunc bool + if l := len(c.fds); n > l { + n = l + } else if n < l { + trunc = true + } + + rf := control.RightsFiles(fdsToFiles(ctx, c.fds[:n])) + + // Only consume converted FDs (fdsToFiles may convert fewer than n FDs). + c.fds = c.fds[len(rf):] + return rf, trunc +} + +// Clone implements transport.RightsControlMessage.Clone. +func (c *scmRights) Clone() transport.RightsControlMessage { + // Host rights never need to be cloned. + return nil +} + +// Release implements transport.RightsControlMessage.Release. +func (c *scmRights) Release() { + for _, fd := range c.fds { + syscall.Close(fd) + } + c.fds = nil +} + +// If an error is encountered, only files created before the error will be +// returned. This is what Linux does. +func fdsToFiles(ctx context.Context, fds []int) []*fs.File { + files := make([]*fs.File, 0, len(fds)) + for _, fd := range fds { + // Get flags. We do it here because they may be modified + // by subsequent functions. + fileFlags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFL, 0) + if errno != 0 { + ctx.Warningf("Error retrieving host FD flags: %v", error(errno)) + break + } + + // Create the file backed by hostFD. + file, err := NewFile(ctx, fd) + if err != nil { + ctx.Warningf("Error creating file from host FD: %v", err) + break + } + + // Set known flags. + file.SetFlags(fs.SettableFileFlags{ + NonBlocking: fileFlags&syscall.O_NONBLOCK != 0, + }) + + files = append(files, file) + } + return files +} + +// LINT.ThenChange(../../fsimpl/host/control.go) diff --git a/pkg/sentry/fs/host/descriptor.go b/pkg/sentry/fs/host/descriptor.go new file mode 100644 index 000000000..cfdce6a74 --- /dev/null +++ b/pkg/sentry/fs/host/descriptor.go @@ -0,0 +1,99 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/waiter" +) + +// descriptor wraps a host fd. +// +// +stateify savable +type descriptor struct { + // If origFD >= 0, it is the host fd that this file was originally created + // from, which must be available at time of restore. The FD can be closed + // after descriptor is created. + origFD int + + // wouldBlock is true if value (below) points to a file that can + // return EWOULDBLOCK for operations that would block. + wouldBlock bool + + // value is the wrapped host fd. It is never saved or restored + // directly. + value int `state:"nosave"` +} + +// newDescriptor returns a wrapped host file descriptor. On success, +// the descriptor is registered for event notifications with queue. +func newDescriptor(fd int, saveable bool, wouldBlock bool, queue *waiter.Queue) (*descriptor, error) { + ownedFD := fd + origFD := -1 + if saveable { + var err error + ownedFD, err = syscall.Dup(fd) + if err != nil { + return nil, err + } + origFD = fd + } + if wouldBlock { + if err := syscall.SetNonblock(ownedFD, true); err != nil { + return nil, err + } + if err := fdnotifier.AddFD(int32(ownedFD), queue); err != nil { + return nil, err + } + } + return &descriptor{ + origFD: origFD, + wouldBlock: wouldBlock, + value: ownedFD, + }, nil +} + +// initAfterLoad initializes the value of the descriptor after Load. +func (d *descriptor) initAfterLoad(id uint64, queue *waiter.Queue) error { + var err error + d.value, err = syscall.Dup(d.origFD) + if err != nil { + return fmt.Errorf("failed to dup restored fd %d: %v", d.origFD, err) + } + if d.wouldBlock { + if err := syscall.SetNonblock(d.value, true); err != nil { + return err + } + if err := fdnotifier.AddFD(int32(d.value), queue); err != nil { + return err + } + } + return nil +} + +// Release releases all resources held by descriptor. +func (d *descriptor) Release() { + if d.wouldBlock { + fdnotifier.RemoveFD(int32(d.value)) + } + if err := syscall.Close(d.value); err != nil { + log.Warningf("error closing fd %d: %v", d.value, err) + } + d.value = -1 +} diff --git a/pkg/sentry/fs/host/descriptor_state.go b/pkg/sentry/fs/host/descriptor_state.go new file mode 100644 index 000000000..e880582ab --- /dev/null +++ b/pkg/sentry/fs/host/descriptor_state.go @@ -0,0 +1,29 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +// beforeSave is invoked by stateify. +func (d *descriptor) beforeSave() { + if d.origFD < 0 { + panic("donated file descriptor cannot be saved") + } +} + +// afterLoad is invoked by stateify. +func (d *descriptor) afterLoad() { + // value must be manually restored by the descriptor's parent using + // initAfterLoad. + d.value = -1 +} diff --git a/pkg/sentry/fs/host/descriptor_test.go b/pkg/sentry/fs/host/descriptor_test.go new file mode 100644 index 000000000..d8e4605b6 --- /dev/null +++ b/pkg/sentry/fs/host/descriptor_test.go @@ -0,0 +1,78 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "io/ioutil" + "path/filepath" + "syscall" + "testing" + + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/waiter" +) + +func TestDescriptorRelease(t *testing.T) { + for _, tc := range []struct { + name string + saveable bool + wouldBlock bool + }{ + {name: "all false"}, + {name: "saveable", saveable: true}, + {name: "wouldBlock", wouldBlock: true}, + } { + t.Run(tc.name, func(t *testing.T) { + dir, err := ioutil.TempDir("", "descriptor_test") + if err != nil { + t.Fatal("ioutil.TempDir() failed:", err) + } + + fd, err := syscall.Open(filepath.Join(dir, "file"), syscall.O_RDWR|syscall.O_CREAT, 0666) + if err != nil { + t.Fatal("failed to open temp file:", err) + } + + // FD ownership is transferred to the descritor. + queue := &waiter.Queue{} + d, err := newDescriptor(fd, tc.saveable, tc.wouldBlock, queue) + if err != nil { + syscall.Close(fd) + t.Fatalf("newDescriptor(%d, %t, %t, queue) failed, err: %v", fd, tc.saveable, tc.wouldBlock, err) + } + if tc.saveable { + if d.origFD < 0 { + t.Errorf("saveable descriptor must preserve origFD, desc: %+v", d) + } + } + if tc.wouldBlock { + if !fdnotifier.HasFD(int32(d.value)) { + t.Errorf("FD not registered with notifier, desc: %+v", d) + } + } + + oldVal := d.value + d.Release() + if d.value != -1 { + t.Errorf("d.value want: -1, got: %d", d.value) + } + if tc.wouldBlock { + if fdnotifier.HasFD(int32(oldVal)) { + t.Errorf("FD not unregistered with notifier, desc: %+v", d) + } + } + }) + } +} diff --git a/pkg/sentry/fs/host/device.go b/pkg/sentry/fs/host/device.go new file mode 100644 index 000000000..484f0b58b --- /dev/null +++ b/pkg/sentry/fs/host/device.go @@ -0,0 +1,25 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "gvisor.dev/gvisor/pkg/sentry/device" +) + +// hostFileDevice is the host file virtual device. +var hostFileDevice = device.NewAnonMultiDevice() + +// hostPipeDevice is the host pipe virtual device. +var hostPipeDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/host/file.go b/pkg/sentry/fs/host/file.go new file mode 100644 index 000000000..3e48b8b2c --- /dev/null +++ b/pkg/sentry/fs/host/file.go @@ -0,0 +1,286 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/secio" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// fileOperations implements fs.FileOperations for a host file descriptor. +// +// +stateify savable +type fileOperations struct { + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosplice"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + // iops are the Inode operations for this file. + iops *inodeOperations `state:"wait"` + + // a scratch buffer for reading directory entries. + dirinfo *dirInfo `state:"nosave"` + + // dirCursor is the directory cursor. + dirCursor string +} + +// fileOperations implements fs.FileOperations. +var _ fs.FileOperations = (*fileOperations)(nil) + +// NewFile creates a new File backed by the provided host file descriptor. If +// NewFile succeeds, ownership of the FD is transferred to the returned File. +// +// The returned File cannot be saved, since there is no guarantee that the same +// FD will exist or represent the same file at time of restore. If such a +// guarantee does exist, use ImportFile instead. +func NewFile(ctx context.Context, fd int) (*fs.File, error) { + return newFileFromDonatedFD(ctx, fd, false, false) +} + +// ImportFile creates a new File backed by the provided host file descriptor. +// Unlike NewFile, the file descriptor used by the File is duped from FD to +// ensure that later changes to FD are not reflected by the fs.File. +// +// If the returned file is saved, it will be restored by re-importing the FD +// originally passed to ImportFile. It is the restorer's responsibility to +// ensure that the FD represents the same file. +func ImportFile(ctx context.Context, fd int, isTTY bool) (*fs.File, error) { + return newFileFromDonatedFD(ctx, fd, true, isTTY) +} + +// newFileFromDonatedFD returns an fs.File from a donated FD. If the FD is +// saveable, then saveable is true. +func newFileFromDonatedFD(ctx context.Context, donated int, saveable, isTTY bool) (*fs.File, error) { + var s syscall.Stat_t + if err := syscall.Fstat(donated, &s); err != nil { + return nil, err + } + flags, err := fileFlagsFromDonatedFD(donated) + if err != nil { + return nil, err + } + switch s.Mode & syscall.S_IFMT { + case syscall.S_IFSOCK: + if isTTY { + return nil, fmt.Errorf("cannot import host socket as TTY") + } + + s, err := newSocket(ctx, donated, saveable) + if err != nil { + return nil, err + } + s.SetFlags(fs.SettableFileFlags{ + NonBlocking: flags.NonBlocking, + }) + return s, nil + default: + msrc := fs.NewNonCachingMountSource(ctx, &filesystem{}, fs.MountSourceFlags{}) + inode, err := newInode(ctx, msrc, donated, saveable) + if err != nil { + return nil, err + } + iops := inode.InodeOperations.(*inodeOperations) + + name := fmt.Sprintf("host:[%d]", inode.StableAttr.InodeID) + dirent := fs.NewDirent(ctx, inode, name) + defer dirent.DecRef() + + if isTTY { + return newTTYFile(ctx, dirent, flags, iops), nil + } + + return newFile(ctx, dirent, flags, iops), nil + } +} + +func fileFlagsFromDonatedFD(donated int) (fs.FileFlags, error) { + flags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(donated), syscall.F_GETFL, 0) + if errno != 0 { + log.Warningf("Failed to get file flags for donated FD %d (errno=%d)", donated, errno) + return fs.FileFlags{}, syscall.EIO + } + accmode := flags & syscall.O_ACCMODE + return fs.FileFlags{ + Direct: flags&syscall.O_DIRECT != 0, + NonBlocking: flags&syscall.O_NONBLOCK != 0, + Sync: flags&syscall.O_SYNC != 0, + Append: flags&syscall.O_APPEND != 0, + Read: accmode == syscall.O_RDONLY || accmode == syscall.O_RDWR, + Write: accmode == syscall.O_WRONLY || accmode == syscall.O_RDWR, + }, nil +} + +// newFile returns a new fs.File. +func newFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags, iops *inodeOperations) *fs.File { + if !iops.ReturnsWouldBlock() { + // Allow reading/writing at an arbitrary offset for files + // that support it. + flags.Pread = true + flags.Pwrite = true + } + return fs.NewFile(ctx, dirent, flags, &fileOperations{iops: iops}) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (f *fileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + f.iops.fileState.queue.EventRegister(e, mask) + fdnotifier.UpdateFD(int32(f.iops.fileState.FD())) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (f *fileOperations) EventUnregister(e *waiter.Entry) { + f.iops.fileState.queue.EventUnregister(e) + fdnotifier.UpdateFD(int32(f.iops.fileState.FD())) +} + +// Readiness uses the poll() syscall to check the status of the underlying FD. +func (f *fileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return fdnotifier.NonBlockingPoll(int32(f.iops.fileState.FD()), mask) +} + +// Readdir implements fs.FileOperations.Readdir. +func (f *fileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) +} + +// IterateDir implements fs.DirIterator.IterateDir. +func (f *fileOperations) IterateDir(ctx context.Context, d *fs.Dirent, dirCtx *fs.DirCtx, offset int) (int, error) { + if f.dirinfo == nil { + f.dirinfo = new(dirInfo) + f.dirinfo.buf = make([]byte, usermem.PageSize) + } + entries, err := f.iops.readdirAll(f.dirinfo) + if err != nil { + return offset, err + } + count, err := fs.GenericReaddir(dirCtx, fs.NewSortedDentryMap(entries)) + return offset + count, err +} + +// Write implements fs.FileOperations.Write. +func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + // Would this file block? + if f.iops.ReturnsWouldBlock() { + // These files can't be memory mapped, assert this. This also + // means that writes do not need to synchronize with memory + // mappings nor metadata cached by this file's fs.Inode. + if canMap(file.Dirent.Inode) { + panic("files that can return EWOULDBLOCK cannot be memory mapped") + } + // Ignore the offset, these files don't support writing at + // an arbitrary offset. + writer := fd.NewReadWriter(f.iops.fileState.FD()) + n, err := src.CopyInTo(ctx, safemem.FromIOWriter{writer}) + if isBlockError(err) { + err = syserror.ErrWouldBlock + } + return n, err + } + if !file.Dirent.Inode.MountSource.Flags.ForcePageCache { + writer := secio.NewOffsetWriter(fd.NewReadWriter(f.iops.fileState.FD()), offset) + return src.CopyInTo(ctx, safemem.FromIOWriter{writer}) + } + return f.iops.cachingInodeOps.Write(ctx, src, offset) +} + +// Read implements fs.FileOperations.Read. +func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + // Would this file block? + if f.iops.ReturnsWouldBlock() { + // These files can't be memory mapped, assert this. This also + // means that reads do not need to synchronize with memory + // mappings nor metadata cached by this file's fs.Inode. + if canMap(file.Dirent.Inode) { + panic("files that can return EWOULDBLOCK cannot be memory mapped") + } + // Ignore the offset, these files don't support reading at + // an arbitrary offset. + reader := fd.NewReadWriter(f.iops.fileState.FD()) + n, err := dst.CopyOutFrom(ctx, safemem.FromIOReader{reader}) + if isBlockError(err) { + // If we got any data at all, return it as a "completed" partial read + // rather than retrying until complete. + if n != 0 { + err = nil + } else { + err = syserror.ErrWouldBlock + } + } + return n, err + } + if !file.Dirent.Inode.MountSource.Flags.ForcePageCache { + reader := secio.NewOffsetReader(fd.NewReadWriter(f.iops.fileState.FD()), offset) + return dst.CopyOutFrom(ctx, safemem.FromIOReader{reader}) + } + return f.iops.cachingInodeOps.Read(ctx, file, dst, offset) +} + +// Fsync implements fs.FileOperations.Fsync. +func (f *fileOperations) Fsync(ctx context.Context, file *fs.File, start int64, end int64, syncType fs.SyncType) error { + switch syncType { + case fs.SyncAll, fs.SyncData: + if err := file.Dirent.Inode.WriteOut(ctx); err != nil { + return err + } + fallthrough + case fs.SyncBackingStorage: + return syscall.Fsync(f.iops.fileState.FD()) + } + panic("invalid sync type") +} + +// Flush implements fs.FileOperations.Flush. +func (f *fileOperations) Flush(context.Context, *fs.File) error { + // This is a no-op because flushing the resource backing this + // file would mean closing it. We can't do that because other + // open files may depend on the backing host FD. + return nil +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (f *fileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + if !canMap(file.Dirent.Inode) { + return syserror.ENODEV + } + return fsutil.GenericConfigureMMap(file, f.iops.cachingInodeOps, opts) +} + +// Seek implements fs.FileOperations.Seek. +func (f *fileOperations) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return fsutil.SeekWithDirCursor(ctx, file, whence, offset, &f.dirCursor) +} diff --git a/pkg/sentry/fs/host/host.go b/pkg/sentry/fs/host/host.go new file mode 100644 index 000000000..081ba1dd8 --- /dev/null +++ b/pkg/sentry/fs/host/host.go @@ -0,0 +1,59 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package host supports file descriptors imported directly. +package host + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/syserror" +) + +// filesystem is a host filesystem. +// +// +stateify savable +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +const FilesystemName = "host" + +// Name is the name of the filesystem. +func (*filesystem) Name() string { + return FilesystemName +} + +// Mount returns an error. Mounting hostfs is not allowed. +func (*filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, dataObj interface{}) (*fs.Inode, error) { + return nil, syserror.EPERM +} + +// AllowUserMount prohibits users from using mount(2) with this file system. +func (*filesystem) AllowUserMount() bool { + return false +} + +// AllowUserList prohibits this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return false +} + +// Flags returns that there is nothing special about this file system. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} diff --git a/pkg/sentry/fs/host/inode.go b/pkg/sentry/fs/host/inode.go new file mode 100644 index 000000000..fbfba1b58 --- /dev/null +++ b/pkg/sentry/fs/host/inode.go @@ -0,0 +1,416 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/secio" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/waiter" +) + +// inodeOperations implements fs.InodeOperations for an fs.Inodes backed +// by a host file descriptor. +// +// +stateify savable +type inodeOperations struct { + fsutil.InodeNotVirtual `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + + // fileState implements fs.CachedFileObject. It exists + // to break a circular load dependency between inodeOperations + // and cachingInodeOps (below). + fileState *inodeFileState `state:"wait"` + + // cachedInodeOps implements memmap.Mappable. + cachingInodeOps *fsutil.CachingInodeOperations + + // readdirMu protects the file offset on the host FD. This is needed + // for readdir because getdents must use the kernel offset, so + // concurrent readdirs must be exclusive. + // + // All read/write functions pass the offset directly to the kernel and + // thus don't need a lock. + readdirMu sync.Mutex `state:"nosave"` +} + +// inodeFileState implements fs.CachedFileObject and otherwise fully +// encapsulates state that needs to be manually loaded on restore for +// this file object. +// +// This unfortunate structure exists because fs.CachingInodeOperations +// defines afterLoad and therefore cannot be lazily loaded (to break a +// circular load dependency between it and inodeOperations). Even with +// lazy loading, this approach defines the dependencies between objects +// and the expected load behavior more concretely. +// +// +stateify savable +type inodeFileState struct { + // descriptor is the backing host FD. + descriptor *descriptor `state:"wait"` + + // Event queue for blocking operations. + queue waiter.Queue `state:"zerovalue"` + + // sattr is used to restore the inodeOperations. + sattr fs.StableAttr `state:"wait"` + + // savedUAttr is only allocated during S/R. It points to the save-time + // unstable attributes and is used to validate restore-time ones. + // + // Note that these unstable attributes are only used to detect cross-S/R + // external file system metadata changes. They may differ from the + // cached unstable attributes in cachingInodeOps, as that might differ + // from the external file system attributes if there had been WriteOut + // failures. S/R is transparent to Sentry and the latter will continue + // using its cached values after restore. + savedUAttr *fs.UnstableAttr +} + +// ReadToBlocksAt implements fsutil.CachedFileObject.ReadToBlocksAt. +func (i *inodeFileState) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + // TODO(jamieliu): Using safemem.FromIOReader here is wasteful for two + // reasons: + // + // - Using preadv instead of iterated preads saves on host system calls. + // + // - Host system calls can handle destination memory that would fault in + // gr3 (i.e. they can accept safemem.Blocks with NeedSafecopy() == true), + // so the buffering performed by FromIOReader is unnecessary. + // + // This also applies to the write path below. + return safemem.FromIOReader{secio.NewOffsetReader(fd.NewReadWriter(i.FD()), int64(offset))}.ReadToBlocks(dsts) +} + +// WriteFromBlocksAt implements fsutil.CachedFileObject.WriteFromBlocksAt. +func (i *inodeFileState) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + return safemem.FromIOWriter{secio.NewOffsetWriter(fd.NewReadWriter(i.FD()), int64(offset))}.WriteFromBlocks(srcs) +} + +// SetMaskedAttributes implements fsutil.CachedFileObject.SetMaskedAttributes. +func (i *inodeFileState) SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr, _ bool) error { + if mask.Empty() { + return nil + } + if mask.UID || mask.GID { + return syserror.EPERM + } + if mask.Perms { + if err := syscall.Fchmod(i.FD(), uint32(attr.Perms.LinuxMode())); err != nil { + return err + } + } + if mask.Size { + if err := syscall.Ftruncate(i.FD(), attr.Size); err != nil { + return err + } + } + if mask.AccessTime || mask.ModificationTime { + ts := fs.TimeSpec{ + ATime: attr.AccessTime, + ATimeOmit: !mask.AccessTime, + MTime: attr.ModificationTime, + MTimeOmit: !mask.ModificationTime, + } + if err := setTimestamps(i.FD(), ts); err != nil { + return err + } + } + return nil +} + +// Sync implements fsutil.CachedFileObject.Sync. +func (i *inodeFileState) Sync(ctx context.Context) error { + return syscall.Fsync(i.FD()) +} + +// FD implements fsutil.CachedFileObject.FD. +func (i *inodeFileState) FD() int { + return i.descriptor.value +} + +func (i *inodeFileState) unstableAttr(ctx context.Context) (fs.UnstableAttr, error) { + var s syscall.Stat_t + if err := syscall.Fstat(i.FD(), &s); err != nil { + return fs.UnstableAttr{}, err + } + return unstableAttr(&s), nil +} + +// Allocate implements fsutil.CachedFileObject.Allocate. +func (i *inodeFileState) Allocate(_ context.Context, offset, length int64) error { + return syscall.Fallocate(i.FD(), 0, offset, length) +} + +// inodeOperations implements fs.InodeOperations. +var _ fs.InodeOperations = (*inodeOperations)(nil) + +// newInode returns a new fs.Inode backed by the host FD. +func newInode(ctx context.Context, msrc *fs.MountSource, fd int, saveable bool) (*fs.Inode, error) { + // Retrieve metadata. + var s syscall.Stat_t + err := syscall.Fstat(fd, &s) + if err != nil { + return nil, err + } + + fileState := &inodeFileState{ + sattr: stableAttr(&s), + } + + // Initialize the wrapped host file descriptor. + fileState.descriptor, err = newDescriptor(fd, saveable, wouldBlock(&s), &fileState.queue) + if err != nil { + return nil, err + } + + // Build the fs.InodeOperations. + uattr := unstableAttr(&s) + iops := &inodeOperations{ + fileState: fileState, + cachingInodeOps: fsutil.NewCachingInodeOperations(ctx, fileState, uattr, fsutil.CachingInodeOperationsOptions{ + ForcePageCache: msrc.Flags.ForcePageCache, + }), + } + + // Return the fs.Inode. + return fs.NewInode(ctx, iops, msrc, fileState.sattr), nil +} + +// Mappable implements fs.InodeOperations.Mappable. +func (i *inodeOperations) Mappable(inode *fs.Inode) memmap.Mappable { + if !canMap(inode) { + return nil + } + return i.cachingInodeOps +} + +// ReturnsWouldBlock returns true if this host FD can return EWOULDBLOCK for +// operations that would block. +func (i *inodeOperations) ReturnsWouldBlock() bool { + return i.fileState.descriptor.wouldBlock +} + +// Release implements fs.InodeOperations.Release. +func (i *inodeOperations) Release(context.Context) { + i.fileState.descriptor.Release() + i.cachingInodeOps.Release() +} + +// Lookup implements fs.InodeOperations.Lookup. +func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + return nil, syserror.ENOENT +} + +// Create implements fs.InodeOperations.Create. +func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + return nil, syserror.EPERM + +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syserror.EPERM +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error { + return syserror.EPERM +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +func (*inodeOperations) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error { + return syserror.EPERM +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.EPERM +} + +// Remove implements fs.InodeOperations.Remove. +func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// Rename implements fs.InodeOperations.Rename. +func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return syserror.EPERM +} + +// Bind implements fs.InodeOperations.Bind. +func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) { + return nil, syserror.EOPNOTSUPP +} + +// BoundEndpoint implements fs.InodeOperations.BoundEndpoint. +func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) transport.BoundEndpoint { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return newFile(ctx, d, flags, i), nil +} + +// canMap returns true if this fs.Inode can be memory mapped. +func canMap(inode *fs.Inode) bool { + // FIXME(b/38213152): Some obscure character devices can be mapped. + return fs.IsFile(inode.StableAttr) +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + // When the kernel supports mapping host FDs, we do so to take + // advantage of the host page cache. We forego updating fs.Inodes + // because the host manages consistency of its own inode structures. + // + // For fs.Inodes that can never be mapped we take advantage of + // synchronizing metadata updates through host caches. + // + // So can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just obtain the attributes. + return i.fileState.unstableAttr(ctx) + } + // No, we're maintaining consistency of metadata ourselves. + return i.cachingInodeOps.UnstableAttr(ctx, inode) +} + +// Check implements fs.InodeOperations.Check. +func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *inodeOperations) SetOwner(context.Context, *fs.Inode, fs.FileOwner) error { + return syserror.EPERM +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (i *inodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, f fs.FilePermissions) bool { + // Can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just change the timestamps on the FD, the host + // will synchronize the metadata update with any host + // inode and page cache. + return syscall.Fchmod(i.fileState.FD(), uint32(f.LinuxMode())) == nil + } + // Otherwise update our cached metadata. + return i.cachingInodeOps.SetPermissions(ctx, inode, f) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *inodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + // Can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just change the timestamps on the FD, the host + // will synchronize the metadata update with any host + // inode and page cache. + return setTimestamps(i.fileState.FD(), ts) + } + // Otherwise update our cached metadata. + return i.cachingInodeOps.SetTimestamps(ctx, inode, ts) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (i *inodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + // Is the file not memory-mappable? + if !canMap(inode) { + // Then just change the file size on the FD, the host + // will synchronize the metadata update with any host + // inode and page cache. + return syscall.Ftruncate(i.fileState.FD(), size) + } + // Otherwise we need to go through cachingInodeOps, even if the host page + // cache is in use, to invalidate private copies of truncated pages. + return i.cachingInodeOps.Truncate(ctx, inode, size) +} + +// Allocate implements fs.InodeOperations.Allocate. +func (i *inodeOperations) Allocate(ctx context.Context, inode *fs.Inode, offset, length int64) error { + // Is the file not memory-mappable? + if !canMap(inode) { + // Then just send the call to the FD, the host will synchronize the metadata + // update with any host inode and page cache. + return i.fileState.Allocate(ctx, offset, length) + } + // Otherwise we need to go through cachingInodeOps, even if the host page + // cache is in use, to invalidate private copies of truncated pages. + return i.cachingInodeOps.Allocate(ctx, offset, length) +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + if inode.MountSource.Flags.ReadOnly { + return nil + } + // Have we been using host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then the metadata is already up to date on the host. + return nil + } + // Otherwise we need to write out cached pages and attributes + // that are dirty. + return i.cachingInodeOps.WriteOut(ctx, inode) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + return readLink(i.fileState.FD()) +} + +// Getlink implements fs.InodeOperations.Getlink. +func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + if !fs.IsSymlink(i.fileState.sattr) { + return nil, syserror.ENOLINK + } + return nil, fs.ErrResolveViaReadlink +} + +// StatFS implements fs.InodeOperations.StatFS. +func (i *inodeOperations) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syserror.ENOSYS +} + +// AddLink implements fs.InodeOperations.AddLink. +func (i *inodeOperations) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +func (i *inodeOperations) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (i *inodeOperations) NotifyStatusChange(ctx context.Context) {} + +// readdirAll returns all of the directory entries in i. +func (i *inodeOperations) readdirAll(d *dirInfo) (map[string]fs.DentAttr, error) { + // We only support non-directory file descriptors that have been + // imported, so just claim that this isn't a directory, even if it is. + return nil, syscall.ENOTDIR +} diff --git a/pkg/sentry/fs/host/inode_state.go b/pkg/sentry/fs/host/inode_state.go new file mode 100644 index 000000000..1adbd4562 --- /dev/null +++ b/pkg/sentry/fs/host/inode_state.go @@ -0,0 +1,49 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// afterLoad is invoked by stateify. +func (i *inodeFileState) afterLoad() { + // Initialize the descriptor value. + if err := i.descriptor.initAfterLoad(i.sattr.InodeID, &i.queue); err != nil { + panic(fmt.Sprintf("failed to load value of descriptor: %v", err)) + } + + // Remap the inode number. + var s syscall.Stat_t + if err := syscall.Fstat(i.FD(), &s); err != nil { + panic(fs.ErrCorruption{fmt.Errorf("failed to get metadata for fd %d: %v", i.FD(), err)}) + } + key := device.MultiDeviceKey{ + Device: s.Dev, + Inode: s.Ino, + } + if !hostFileDevice.Load(key, i.sattr.InodeID) { + // This means there was a conflict at s.Dev and s.Ino with + // another inode mapping: two files that were unique on the + // saved filesystem are no longer unique on this filesystem. + // Since this violates the contract that filesystems cannot + // change across save and restore, error out. + panic(fs.ErrCorruption{fmt.Errorf("host %s conflict in host device mappings: %s", key, hostFileDevice)}) + } +} diff --git a/pkg/sentry/fs/host/inode_test.go b/pkg/sentry/fs/host/inode_test.go new file mode 100644 index 000000000..c507f57eb --- /dev/null +++ b/pkg/sentry/fs/host/inode_test.go @@ -0,0 +1,45 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "testing" + + "gvisor.dev/gvisor/pkg/sentry/contexttest" +) + +// TestCloseFD verifies fds will be closed. +func TestCloseFD(t *testing.T) { + var p [2]int + if err := syscall.Pipe(p[0:]); err != nil { + t.Fatalf("Failed to create pipe %v", err) + } + defer syscall.Close(p[0]) + defer syscall.Close(p[1]) + + // Use the write-end because we will detect if it's closed on the read end. + ctx := contexttest.Context(t) + file, err := NewFile(ctx, p[1]) + if err != nil { + t.Fatalf("Failed to create File: %v", err) + } + file.DecRef() + + s := make([]byte, 10) + if c, err := syscall.Read(p[0], s); c != 0 || err != nil { + t.Errorf("want 0, nil (EOF) from read end, got %v, %v", c, err) + } +} diff --git a/pkg/sentry/fs/host/ioctl_unsafe.go b/pkg/sentry/fs/host/ioctl_unsafe.go new file mode 100644 index 000000000..150ac8e19 --- /dev/null +++ b/pkg/sentry/fs/host/ioctl_unsafe.go @@ -0,0 +1,60 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" + + "gvisor.dev/gvisor/pkg/abi/linux" +) + +// LINT.IfChange + +func ioctlGetTermios(fd int) (*linux.Termios, error) { + var t linux.Termios + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), linux.TCGETS, uintptr(unsafe.Pointer(&t))) + if errno != 0 { + return nil, errno + } + return &t, nil +} + +func ioctlSetTermios(fd int, req uint64, t *linux.Termios) error { + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(unsafe.Pointer(t))) + if errno != 0 { + return errno + } + return nil +} + +func ioctlGetWinsize(fd int) (*linux.Winsize, error) { + var w linux.Winsize + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), linux.TIOCGWINSZ, uintptr(unsafe.Pointer(&w))) + if errno != 0 { + return nil, errno + } + return &w, nil +} + +func ioctlSetWinsize(fd int, w *linux.Winsize) error { + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), linux.TIOCSWINSZ, uintptr(unsafe.Pointer(w))) + if errno != 0 { + return errno + } + return nil +} + +// LINT.ThenChange(../../fsimpl/host/ioctl_unsafe.go) diff --git a/pkg/sentry/fs/host/socket.go b/pkg/sentry/fs/host/socket.go new file mode 100644 index 000000000..cfb089e43 --- /dev/null +++ b/pkg/sentry/fs/host/socket.go @@ -0,0 +1,384 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/socket/control" + unixsocket "gvisor.dev/gvisor/pkg/sentry/socket/unix" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sentry/uniqueid" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserr" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/unet" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// ConnectedEndpoint is a host FD backed implementation of +// transport.ConnectedEndpoint and transport.Receiver. +// +// +stateify savable +type ConnectedEndpoint struct { + // ref keeps track of references to a connectedEndpoint. + ref refs.AtomicRefCount + + queue *waiter.Queue + path string + + // If srfd >= 0, it is the host FD that file was imported from. + srfd int `state:"wait"` + + // stype is the type of Unix socket. + stype linux.SockType + + // sndbuf is the size of the send buffer. + // + // N.B. When this is smaller than the host size, we present it via + // GetSockOpt and message splitting/rejection in SendMsg, but do not + // prevent lots of small messages from filling the real send buffer + // size on the host. + sndbuf int64 `state:"nosave"` + + // mu protects the fields below. + mu sync.RWMutex `state:"nosave"` + + // file is an *fd.FD containing the FD backing this endpoint. It must be + // set to nil if it has been closed. + file *fd.FD `state:"nosave"` +} + +// init performs initialization required for creating new ConnectedEndpoints and +// for restoring them. +func (c *ConnectedEndpoint) init() *syserr.Error { + family, err := syscall.GetsockoptInt(c.file.FD(), syscall.SOL_SOCKET, syscall.SO_DOMAIN) + if err != nil { + return syserr.FromError(err) + } + + if family != syscall.AF_UNIX { + // We only allow Unix sockets. + return syserr.ErrInvalidEndpointState + } + + stype, err := syscall.GetsockoptInt(c.file.FD(), syscall.SOL_SOCKET, syscall.SO_TYPE) + if err != nil { + return syserr.FromError(err) + } + + if err := syscall.SetNonblock(c.file.FD(), true); err != nil { + return syserr.FromError(err) + } + + sndbuf, err := syscall.GetsockoptInt(c.file.FD(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) + if err != nil { + return syserr.FromError(err) + } + + c.stype = linux.SockType(stype) + c.sndbuf = int64(sndbuf) + + return nil +} + +// NewConnectedEndpoint creates a new ConnectedEndpoint backed by a host FD +// that will pretend to be bound at a given sentry path. +// +// The caller is responsible for calling Init(). Additionaly, Release needs to +// be called twice because ConnectedEndpoint is both a transport.Receiver and +// transport.ConnectedEndpoint. +func NewConnectedEndpoint(ctx context.Context, file *fd.FD, queue *waiter.Queue, path string) (*ConnectedEndpoint, *syserr.Error) { + e := ConnectedEndpoint{ + path: path, + queue: queue, + file: file, + srfd: -1, + } + + if err := e.init(); err != nil { + return nil, err + } + + // AtomicRefCounters start off with a single reference. We need two. + e.ref.IncRef() + + e.ref.EnableLeakCheck("host.ConnectedEndpoint") + + return &e, nil +} + +// Init will do initialization required without holding other locks. +func (c *ConnectedEndpoint) Init() { + if err := fdnotifier.AddFD(int32(c.file.FD()), c.queue); err != nil { + panic(err) + } +} + +// NewSocketWithDirent allocates a new unix socket with host endpoint. +// +// This is currently only used by unsaveable Gofer nodes. +// +// NewSocketWithDirent takes ownership of f on success. +func NewSocketWithDirent(ctx context.Context, d *fs.Dirent, f *fd.FD, flags fs.FileFlags) (*fs.File, error) { + f2 := fd.New(f.FD()) + var q waiter.Queue + e, err := NewConnectedEndpoint(ctx, f2, &q, "" /* path */) + if err != nil { + f2.Release() + return nil, err.ToError() + } + + // Take ownship of the FD. + f.Release() + + e.Init() + + ep := transport.NewExternal(ctx, e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e) + + return unixsocket.NewWithDirent(ctx, d, ep, e.stype, flags), nil +} + +// newSocket allocates a new unix socket with host endpoint. +func newSocket(ctx context.Context, orgfd int, saveable bool) (*fs.File, error) { + ownedfd := orgfd + srfd := -1 + if saveable { + var err error + ownedfd, err = syscall.Dup(orgfd) + if err != nil { + return nil, err + } + srfd = orgfd + } + f := fd.New(ownedfd) + var q waiter.Queue + e, err := NewConnectedEndpoint(ctx, f, &q, "" /* path */) + if err != nil { + if saveable { + f.Close() + } else { + f.Release() + } + return nil, err.ToError() + } + + e.srfd = srfd + e.Init() + + ep := transport.NewExternal(ctx, e.stype, uniqueid.GlobalProviderFromContext(ctx), &q, e, e) + + return unixsocket.New(ctx, ep, e.stype), nil +} + +// Send implements transport.ConnectedEndpoint.Send. +func (c *ConnectedEndpoint) Send(data [][]byte, controlMessages transport.ControlMessages, from tcpip.FullAddress) (int64, bool, *syserr.Error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if !controlMessages.Empty() { + return 0, false, syserr.ErrInvalidEndpointState + } + + // Since stream sockets don't preserve message boundaries, we can write + // only as much of the message as fits in the send buffer. + truncate := c.stype == linux.SOCK_STREAM + + n, totalLen, err := fdWriteVec(c.file.FD(), data, c.sndbuf, truncate) + if n < totalLen && err == nil { + // The host only returns a short write if it would otherwise + // block (and only for stream sockets). + err = syserror.EAGAIN + } + if n > 0 && err != syserror.EAGAIN { + // The caller may need to block to send more data, but + // otherwise there isn't anything that can be done about an + // error with a partial write. + err = nil + } + + // There is no need for the callee to call SendNotify because fdWriteVec + // uses the host's sendmsg(2) and the host kernel's queue. + return n, false, syserr.FromError(err) +} + +// SendNotify implements transport.ConnectedEndpoint.SendNotify. +func (c *ConnectedEndpoint) SendNotify() {} + +// CloseSend implements transport.ConnectedEndpoint.CloseSend. +func (c *ConnectedEndpoint) CloseSend() { + c.mu.Lock() + defer c.mu.Unlock() + + if err := syscall.Shutdown(c.file.FD(), syscall.SHUT_WR); err != nil { + // A well-formed UDS shutdown can't fail. See + // net/unix/af_unix.c:unix_shutdown. + panic(fmt.Sprintf("failed write shutdown on host socket %+v: %v", c, err)) + } +} + +// CloseNotify implements transport.ConnectedEndpoint.CloseNotify. +func (c *ConnectedEndpoint) CloseNotify() {} + +// Writable implements transport.ConnectedEndpoint.Writable. +func (c *ConnectedEndpoint) Writable() bool { + c.mu.RLock() + defer c.mu.RUnlock() + + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventOut)&waiter.EventOut != 0 +} + +// Passcred implements transport.ConnectedEndpoint.Passcred. +func (c *ConnectedEndpoint) Passcred() bool { + // We don't support credential passing for host sockets. + return false +} + +// GetLocalAddress implements transport.ConnectedEndpoint.GetLocalAddress. +func (c *ConnectedEndpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{Addr: tcpip.Address(c.path)}, nil +} + +// EventUpdate implements transport.ConnectedEndpoint.EventUpdate. +func (c *ConnectedEndpoint) EventUpdate() { + c.mu.RLock() + defer c.mu.RUnlock() + if c.file.FD() != -1 { + fdnotifier.UpdateFD(int32(c.file.FD())) + } +} + +// Recv implements transport.Receiver.Recv. +func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights int, peek bool) (int64, int64, transport.ControlMessages, bool, tcpip.FullAddress, bool, *syserr.Error) { + c.mu.RLock() + defer c.mu.RUnlock() + + var cm unet.ControlMessage + if numRights > 0 { + cm.EnableFDs(int(numRights)) + } + + // N.B. Unix sockets don't have a receive buffer, the send buffer + // serves both purposes. + rl, ml, cl, cTrunc, err := fdReadVec(c.file.FD(), data, []byte(cm), peek, c.sndbuf) + if rl > 0 && err != nil { + // We got some data, so all we need to do on error is return + // the data that we got. Short reads are fine, no need to + // block. + err = nil + } + if err != nil { + return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.FromError(err) + } + + // There is no need for the callee to call RecvNotify because fdReadVec uses + // the host's recvmsg(2) and the host kernel's queue. + + // Trim the control data if we received less than the full amount. + if cl < uint64(len(cm)) { + cm = cm[:cl] + } + + // Avoid extra allocations in the case where there isn't any control data. + if len(cm) == 0 { + return rl, ml, transport.ControlMessages{}, cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil + } + + fds, err := cm.ExtractFDs() + if err != nil { + return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.FromError(err) + } + + if len(fds) == 0 { + return rl, ml, transport.ControlMessages{}, cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil + } + return rl, ml, control.New(nil, nil, newSCMRights(fds)), cTrunc, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, nil +} + +// close releases all resources related to the endpoint. +func (c *ConnectedEndpoint) close() { + fdnotifier.RemoveFD(int32(c.file.FD())) + c.file.Close() + c.file = nil +} + +// RecvNotify implements transport.Receiver.RecvNotify. +func (c *ConnectedEndpoint) RecvNotify() {} + +// CloseRecv implements transport.Receiver.CloseRecv. +func (c *ConnectedEndpoint) CloseRecv() { + c.mu.Lock() + defer c.mu.Unlock() + + if err := syscall.Shutdown(c.file.FD(), syscall.SHUT_RD); err != nil { + // A well-formed UDS shutdown can't fail. See + // net/unix/af_unix.c:unix_shutdown. + panic(fmt.Sprintf("failed read shutdown on host socket %+v: %v", c, err)) + } +} + +// Readable implements transport.Receiver.Readable. +func (c *ConnectedEndpoint) Readable() bool { + c.mu.RLock() + defer c.mu.RUnlock() + + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventIn)&waiter.EventIn != 0 +} + +// SendQueuedSize implements transport.Receiver.SendQueuedSize. +func (c *ConnectedEndpoint) SendQueuedSize() int64 { + // TODO(gvisor.dev/issue/273): SendQueuedSize isn't supported for host + // sockets because we don't allow the sentry to call ioctl(2). + return -1 +} + +// RecvQueuedSize implements transport.Receiver.RecvQueuedSize. +func (c *ConnectedEndpoint) RecvQueuedSize() int64 { + // TODO(gvisor.dev/issue/273): RecvQueuedSize isn't supported for host + // sockets because we don't allow the sentry to call ioctl(2). + return -1 +} + +// SendMaxQueueSize implements transport.Receiver.SendMaxQueueSize. +func (c *ConnectedEndpoint) SendMaxQueueSize() int64 { + return int64(c.sndbuf) +} + +// RecvMaxQueueSize implements transport.Receiver.RecvMaxQueueSize. +func (c *ConnectedEndpoint) RecvMaxQueueSize() int64 { + // N.B. Unix sockets don't use the receive buffer. We'll claim it is + // the same size as the send buffer. + return int64(c.sndbuf) +} + +// Release implements transport.ConnectedEndpoint.Release and transport.Receiver.Release. +func (c *ConnectedEndpoint) Release() { + c.ref.DecRefWithDestructor(c.close) +} + +// CloseUnread implements transport.ConnectedEndpoint.CloseUnread. +func (c *ConnectedEndpoint) CloseUnread() {} + +// LINT.ThenChange(../../fsimpl/host/socket.go) diff --git a/pkg/sentry/fs/host/socket_iovec.go b/pkg/sentry/fs/host/socket_iovec.go new file mode 100644 index 000000000..5c18dbd5e --- /dev/null +++ b/pkg/sentry/fs/host/socket_iovec.go @@ -0,0 +1,117 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/syserror" +) + +// LINT.IfChange + +// maxIovs is the maximum number of iovecs to pass to the host. +var maxIovs = linux.UIO_MAXIOV + +// copyToMulti copies as many bytes from src to dst as possible. +func copyToMulti(dst [][]byte, src []byte) { + for _, d := range dst { + done := copy(d, src) + src = src[done:] + if len(src) == 0 { + break + } + } +} + +// copyFromMulti copies as many bytes from src to dst as possible. +func copyFromMulti(dst []byte, src [][]byte) { + for _, s := range src { + done := copy(dst, s) + dst = dst[done:] + if len(dst) == 0 { + break + } + } +} + +// buildIovec builds an iovec slice from the given []byte slice. +// +// If truncate, truncate bufs > maxlen. Otherwise, immediately return an error. +// +// If length < the total length of bufs, err indicates why, even when returning +// a truncated iovec. +// +// If intermediate != nil, iovecs references intermediate rather than bufs and +// the caller must copy to/from bufs as necessary. +func buildIovec(bufs [][]byte, maxlen int64, truncate bool) (length int64, iovecs []syscall.Iovec, intermediate []byte, err error) { + var iovsRequired int + for _, b := range bufs { + length += int64(len(b)) + if len(b) > 0 { + iovsRequired++ + } + } + + stopLen := length + if length > maxlen { + if truncate { + stopLen = maxlen + err = syserror.EAGAIN + } else { + return 0, nil, nil, syserror.EMSGSIZE + } + } + + if iovsRequired > maxIovs { + // The kernel will reject our call if we pass this many iovs. + // Use a single intermediate buffer instead. + b := make([]byte, stopLen) + + return stopLen, []syscall.Iovec{{ + Base: &b[0], + Len: uint64(stopLen), + }}, b, err + } + + var total int64 + iovecs = make([]syscall.Iovec, 0, iovsRequired) + for i := range bufs { + l := len(bufs[i]) + if l == 0 { + continue + } + + stop := int64(l) + if total+stop > stopLen { + stop = stopLen - total + } + + iovecs = append(iovecs, syscall.Iovec{ + Base: &bufs[i][0], + Len: uint64(stop), + }) + + total += stop + if total >= stopLen { + break + } + } + + return total, iovecs, nil, err +} + +// LINT.ThenChange(../../fsimpl/host/socket_iovec.go) diff --git a/pkg/sentry/fs/host/socket_state.go b/pkg/sentry/fs/host/socket_state.go new file mode 100644 index 000000000..498018f0a --- /dev/null +++ b/pkg/sentry/fs/host/socket_state.go @@ -0,0 +1,42 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/fd" +) + +// beforeSave is invoked by stateify. +func (c *ConnectedEndpoint) beforeSave() { + if c.srfd < 0 { + panic("only host file descriptors provided at sentry startup can be saved") + } +} + +// afterLoad is invoked by stateify. +func (c *ConnectedEndpoint) afterLoad() { + f, err := syscall.Dup(c.srfd) + if err != nil { + panic(fmt.Sprintf("failed to dup restored FD %d: %v", c.srfd, err)) + } + c.file = fd.New(f) + if err := c.init(); err != nil { + panic(fmt.Sprintf("Could not restore host socket FD %d: %v", c.srfd, err)) + } + c.Init() +} diff --git a/pkg/sentry/fs/host/socket_test.go b/pkg/sentry/fs/host/socket_test.go new file mode 100644 index 000000000..affdbcacb --- /dev/null +++ b/pkg/sentry/fs/host/socket_test.go @@ -0,0 +1,246 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "reflect" + "syscall" + "testing" + + "gvisor.dev/gvisor/pkg/fd" + "gvisor.dev/gvisor/pkg/fdnotifier" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/socket" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/syserr" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +var ( + // Make sure that ConnectedEndpoint implements transport.ConnectedEndpoint. + _ = transport.ConnectedEndpoint(new(ConnectedEndpoint)) + + // Make sure that ConnectedEndpoint implements transport.Receiver. + _ = transport.Receiver(new(ConnectedEndpoint)) +) + +func getFl(fd int) (uint32, error) { + fl, _, err := syscall.RawSyscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFL, 0) + if err == 0 { + return uint32(fl), nil + } + return 0, err +} + +func TestSocketIsBlocking(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + + fl, err := getFl(pair[0]) + if err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Fatalf("Expected socket %v to be blocking", pair[0]) + } + if fl, err = getFl(pair[1]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Fatalf("Expected socket %v to be blocking", pair[1]) + } + sock, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) failed => %v", pair[0], err) + } + defer sock.DecRef() + // Test that the socket now is non-blocking. + if fl, err = getFl(pair[0]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err) + } + if fl&syscall.O_NONBLOCK != syscall.O_NONBLOCK { + t.Errorf("Expected socket %v to have become non-blocking", pair[0]) + } + if fl, err = getFl(pair[1]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Errorf("Did not expect socket %v to become non-blocking", pair[1]) + } +} + +func TestSocketWritev(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + socket, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer socket.DecRef() + buf := []byte("hello world\n") + n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(buf)) + if err != nil { + t.Fatalf("socket writev failed: %v", err) + } + + if n != int64(len(buf)) { + t.Fatalf("socket writev wrote incorrect bytes: %d", n) + } +} + +func TestSocketWritevLen0(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + socket, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer socket.DecRef() + n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(nil)) + if err != nil { + t.Fatalf("socket writev failed: %v", err) + } + + if n != 0 { + t.Fatalf("socket writev wrote incorrect bytes: %d", n) + } +} + +func TestSocketSendMsgLen0(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + sfile, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer sfile.DecRef() + + s := sfile.FileOperations.(socket.Socket) + n, terr := s.SendMsg(nil, usermem.BytesIOSequence(nil), []byte{}, 0, false, ktime.Time{}, socket.ControlMessages{}) + if n != 0 { + t.Fatalf("socket sendmsg() failed: %v wrote: %d", terr, n) + } + + if terr != nil { + t.Fatalf("socket sendmsg() failed: %v", terr) + } +} + +func TestListen(t *testing.T) { + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) => %v", err) + } + sfile1, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer sfile1.DecRef() + socket1 := sfile1.FileOperations.(socket.Socket) + + sfile2, err := newSocket(contexttest.Context(t), pair[1], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[1], err) + } + defer sfile2.DecRef() + socket2 := sfile2.FileOperations.(socket.Socket) + + // Socketpairs can not be listened to. + if err := socket1.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket1.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } + if err := socket2.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket2.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } + + // Create a Unix socket, do not bind it. + sock, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) => %v", err) + } + sfile3, err := newSocket(contexttest.Context(t), sock, false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", sock, err) + } + defer sfile3.DecRef() + socket3 := sfile3.FileOperations.(socket.Socket) + + // This socket is not bound so we can't listen on it. + if err := socket3.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket3.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } +} + +func TestPasscred(t *testing.T) { + e := &ConnectedEndpoint{} + if got, want := e.Passcred(), false; got != want { + t.Errorf("Got %#v.Passcred() = %t, want = %t", e, got, want) + } +} + +func TestGetLocalAddress(t *testing.T) { + e := &ConnectedEndpoint{path: "foo"} + want := tcpip.FullAddress{Addr: tcpip.Address("foo")} + if got, err := e.GetLocalAddress(); err != nil || got != want { + t.Errorf("Got %#v.GetLocalAddress() = %#v, %v, want = %#v, %v", e, got, err, want, nil) + } +} + +func TestQueuedSize(t *testing.T) { + e := &ConnectedEndpoint{} + tests := []struct { + name string + f func() int64 + }{ + {"SendQueuedSize", e.SendQueuedSize}, + {"RecvQueuedSize", e.RecvQueuedSize}, + } + + for _, test := range tests { + if got, want := test.f(), int64(-1); got != want { + t.Errorf("Got %#v.%s() = %d, want = %d", e, test.name, got, want) + } + } +} + +func TestRelease(t *testing.T) { + f, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c := &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} + want := &ConnectedEndpoint{queue: c.queue} + want.ref.DecRef() + fdnotifier.AddFD(int32(c.file.FD()), nil) + c.Release() + if !reflect.DeepEqual(c, want) { + t.Errorf("got = %#v, want = %#v", c, want) + } +} diff --git a/pkg/sentry/fs/host/socket_unsafe.go b/pkg/sentry/fs/host/socket_unsafe.go new file mode 100644 index 000000000..5d4f312cf --- /dev/null +++ b/pkg/sentry/fs/host/socket_unsafe.go @@ -0,0 +1,105 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" +) + +// LINT.IfChange + +// fdReadVec receives from fd to bufs. +// +// If the total length of bufs is > maxlen, fdReadVec will do a partial read +// and err will indicate why the message was truncated. +func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool, maxlen int64) (readLen int64, msgLen int64, controlLen uint64, controlTrunc bool, err error) { + flags := uintptr(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC) + if peek { + flags |= syscall.MSG_PEEK + } + + // Always truncate the receive buffer. All socket types will truncate + // received messages. + length, iovecs, intermediate, err := buildIovec(bufs, maxlen, true) + if err != nil && len(iovecs) == 0 { + // No partial write to do, return error immediately. + return 0, 0, 0, false, err + } + + var msg syscall.Msghdr + if len(control) != 0 { + msg.Control = &control[0] + msg.Controllen = uint64(len(control)) + } + + if len(iovecs) != 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + + rawN, _, e := syscall.RawSyscall(syscall.SYS_RECVMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), flags) + if e != 0 { + // N.B. prioritize the syscall error over the buildIovec error. + return 0, 0, 0, false, e + } + n := int64(rawN) + + // Copy data back to bufs. + if intermediate != nil { + copyToMulti(bufs, intermediate) + } + + controlTrunc = msg.Flags&syscall.MSG_CTRUNC == syscall.MSG_CTRUNC + + if n > length { + return length, n, msg.Controllen, controlTrunc, err + } + + return n, n, msg.Controllen, controlTrunc, err +} + +// fdWriteVec sends from bufs to fd. +// +// If the total length of bufs is > maxlen && truncate, fdWriteVec will do a +// partial write and err will indicate why the message was truncated. +func fdWriteVec(fd int, bufs [][]byte, maxlen int64, truncate bool) (int64, int64, error) { + length, iovecs, intermediate, err := buildIovec(bufs, maxlen, truncate) + if err != nil && len(iovecs) == 0 { + // No partial write to do, return error immediately. + return 0, length, err + } + + // Copy data to intermediate buf. + if intermediate != nil { + copyFromMulti(intermediate, bufs) + } + + var msg syscall.Msghdr + if len(iovecs) > 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + + n, _, e := syscall.RawSyscall(syscall.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), syscall.MSG_DONTWAIT|syscall.MSG_NOSIGNAL) + if e != 0 { + // N.B. prioritize the syscall error over the buildIovec error. + return 0, length, e + } + + return int64(n), length, err +} + +// LINT.ThenChange(../../fsimpl/host/socket_unsafe.go) diff --git a/pkg/sentry/fs/host/tty.go b/pkg/sentry/fs/host/tty.go new file mode 100644 index 000000000..82a02fcb2 --- /dev/null +++ b/pkg/sentry/fs/host/tty.go @@ -0,0 +1,364 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/unimpl" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +// LINT.IfChange + +// TTYFileOperations implements fs.FileOperations for a host file descriptor +// that wraps a TTY FD. +// +// +stateify savable +type TTYFileOperations struct { + fileOperations + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // session is the session attached to this TTYFileOperations. + session *kernel.Session + + // fgProcessGroup is the foreground process group that is currently + // connected to this TTY. + fgProcessGroup *kernel.ProcessGroup + + // termios contains the terminal attributes for this TTY. + termios linux.KernelTermios +} + +// newTTYFile returns a new fs.File that wraps a TTY FD. +func newTTYFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags, iops *inodeOperations) *fs.File { + return fs.NewFile(ctx, dirent, flags, &TTYFileOperations{ + fileOperations: fileOperations{iops: iops}, + termios: linux.DefaultSlaveTermios, + }) +} + +// InitForegroundProcessGroup sets the foreground process group and session for +// the TTY. This should only be called once, after the foreground process group +// has been created, but before it has started running. +func (t *TTYFileOperations) InitForegroundProcessGroup(pg *kernel.ProcessGroup) { + t.mu.Lock() + defer t.mu.Unlock() + if t.fgProcessGroup != nil { + panic("foreground process group is already set") + } + t.fgProcessGroup = pg + t.session = pg.Session() +} + +// ForegroundProcessGroup returns the foreground process for the TTY. +func (t *TTYFileOperations) ForegroundProcessGroup() *kernel.ProcessGroup { + t.mu.Lock() + defer t.mu.Unlock() + return t.fgProcessGroup +} + +// Read implements fs.FileOperations.Read. +// +// Reading from a TTY is only allowed for foreground process groups. Background +// process groups will either get EIO or a SIGTTIN. +// +// See drivers/tty/n_tty.c:n_tty_read()=>job_control(). +func (t *TTYFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + t.mu.Lock() + defer t.mu.Unlock() + + // Are we allowed to do the read? + // drivers/tty/n_tty.c:n_tty_read()=>job_control()=>tty_check_change(). + if err := t.checkChange(ctx, linux.SIGTTIN); err != nil { + return 0, err + } + + // Do the read. + return t.fileOperations.Read(ctx, file, dst, offset) +} + +// Write implements fs.FileOperations.Write. +func (t *TTYFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + t.mu.Lock() + defer t.mu.Unlock() + + // Check whether TOSTOP is enabled. This corresponds to the check in + // drivers/tty/n_tty.c:n_tty_write(). + if t.termios.LEnabled(linux.TOSTOP) { + if err := t.checkChange(ctx, linux.SIGTTOU); err != nil { + return 0, err + } + } + return t.fileOperations.Write(ctx, file, src, offset) +} + +// Release implements fs.FileOperations.Release. +func (t *TTYFileOperations) Release() { + t.mu.Lock() + t.fgProcessGroup = nil + t.mu.Unlock() + + t.fileOperations.Release() +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // Ignore arg[0]. This is the real FD: + fd := t.fileOperations.iops.fileState.FD() + ioctl := args[1].Uint64() + switch ioctl { + case linux.TCGETS: + termios, err := ioctlGetTermios(fd) + if err != nil { + return 0, err + } + _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), termios, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case linux.TCSETS, linux.TCSETSW, linux.TCSETSF: + t.mu.Lock() + defer t.mu.Unlock() + + if err := t.checkChange(ctx, linux.SIGTTOU); err != nil { + return 0, err + } + + var termios linux.Termios + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &termios, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + err := ioctlSetTermios(fd, ioctl, &termios) + if err == nil { + t.termios.FromTermios(termios) + } + return 0, err + + case linux.TIOCGPGRP: + // Args: pid_t *argp + // When successful, equivalent to *argp = tcgetpgrp(fd). + // Get the process group ID of the foreground process group on + // this terminal. + + pidns := kernel.PIDNamespaceFromContext(ctx) + if pidns == nil { + return 0, syserror.ENOTTY + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Map the ProcessGroup into a ProcessGroupID in the task's PID + // namespace. + pgID := pidns.IDOfProcessGroup(t.fgProcessGroup) + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case linux.TIOCSPGRP: + // Args: const pid_t *argp + // Equivalent to tcsetpgrp(fd, *argp). + // Set the foreground process group ID of this terminal. + + task := kernel.TaskFromContext(ctx) + if task == nil { + return 0, syserror.ENOTTY + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Check that we are allowed to set the process group. + if err := t.checkChange(ctx, linux.SIGTTOU); err != nil { + // drivers/tty/tty_io.c:tiocspgrp() converts -EIO from + // tty_check_change() to -ENOTTY. + if err == syserror.EIO { + return 0, syserror.ENOTTY + } + return 0, err + } + + // Check that calling task's process group is in the TTY + // session. + if task.ThreadGroup().Session() != t.session { + return 0, syserror.ENOTTY + } + + var pgID kernel.ProcessGroupID + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgID, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + + // pgID must be non-negative. + if pgID < 0 { + return 0, syserror.EINVAL + } + + // Process group with pgID must exist in this PID namespace. + pidns := task.PIDNamespace() + pg := pidns.ProcessGroupWithID(pgID) + if pg == nil { + return 0, syserror.ESRCH + } + + // Check that new process group is in the TTY session. + if pg.Session() != t.session { + return 0, syserror.EPERM + } + + t.fgProcessGroup = pg + return 0, nil + + case linux.TIOCGWINSZ: + // Args: struct winsize *argp + // Get window size. + winsize, err := ioctlGetWinsize(fd) + if err != nil { + return 0, err + } + _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), winsize, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case linux.TIOCSWINSZ: + // Args: const struct winsize *argp + // Set window size. + + // Unlike setting the termios, any process group (even + // background ones) can set the winsize. + + var winsize linux.Winsize + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &winsize, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + err := ioctlSetWinsize(fd, &winsize) + return 0, err + + // Unimplemented commands. + case linux.TIOCSETD, + linux.TIOCSBRK, + linux.TIOCCBRK, + linux.TCSBRK, + linux.TCSBRKP, + linux.TIOCSTI, + linux.TIOCCONS, + linux.FIONBIO, + linux.TIOCEXCL, + linux.TIOCNXCL, + linux.TIOCGEXCL, + linux.TIOCNOTTY, + linux.TIOCSCTTY, + linux.TIOCGSID, + linux.TIOCGETD, + linux.TIOCVHANGUP, + linux.TIOCGDEV, + linux.TIOCMGET, + linux.TIOCMSET, + linux.TIOCMBIC, + linux.TIOCMBIS, + linux.TIOCGICOUNT, + linux.TCFLSH, + linux.TIOCSSERIAL, + linux.TIOCGPTPEER: + + unimpl.EmitUnimplementedEvent(ctx) + fallthrough + default: + return 0, syserror.ENOTTY + } +} + +// checkChange checks that the process group is allowed to read, write, or +// change the state of the TTY. +// +// This corresponds to Linux drivers/tty/tty_io.c:tty_check_change(). The logic +// is a bit convoluted, but documented inline. +// +// Preconditions: t.mu must be held. +func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) error { + task := kernel.TaskFromContext(ctx) + if task == nil { + // No task? Linux does not have an analog for this case, but + // tty_check_change only blocks specific cases and is + // surprisingly permissive. Allowing the change seems + // appropriate. + return nil + } + + tg := task.ThreadGroup() + pg := tg.ProcessGroup() + + // If the session for the task is different than the session for the + // controlling TTY, then the change is allowed. Seems like a bad idea, + // but that's exactly what linux does. + if tg.Session() != t.fgProcessGroup.Session() { + return nil + } + + // If we are the foreground process group, then the change is allowed. + if pg == t.fgProcessGroup { + return nil + } + + // We are not the foreground process group. + + // Is the provided signal blocked or ignored? + if (task.SignalMask()&linux.SignalSetOf(sig) != 0) || tg.SignalHandlers().IsIgnored(sig) { + // If the signal is SIGTTIN, then we are attempting to read + // from the TTY. Don't send the signal and return EIO. + if sig == linux.SIGTTIN { + return syserror.EIO + } + + // Otherwise, we are writing or changing terminal state. This is allowed. + return nil + } + + // If the process group is an orphan, return EIO. + if pg.IsOrphan() { + return syserror.EIO + } + + // Otherwise, send the signal to the process group and return ERESTARTSYS. + // + // Note that Linux also unconditionally sets TIF_SIGPENDING on current, + // but this isn't necessary in gVisor because the rationale given in + // 040b6362d58f "tty: fix leakage of -ERESTARTSYS to userland" doesn't + // apply: the sentry will handle -ERESTARTSYS in + // kernel.runApp.execute() even if the kernel.Task isn't interrupted. + // + // Linux ignores the result of kill_pgrp(). + _ = pg.SendSignal(kernel.SignalInfoPriv(sig)) + return kernel.ERESTARTSYS +} + +// LINT.ThenChange(../../fsimpl/host/tty.go) diff --git a/pkg/sentry/fs/host/util.go b/pkg/sentry/fs/host/util.go new file mode 100644 index 000000000..1b0356930 --- /dev/null +++ b/pkg/sentry/fs/host/util.go @@ -0,0 +1,129 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "os" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/syserror" +) + +func nodeType(s *syscall.Stat_t) fs.InodeType { + switch x := (s.Mode & syscall.S_IFMT); x { + case syscall.S_IFLNK: + return fs.Symlink + case syscall.S_IFIFO: + return fs.Pipe + case syscall.S_IFCHR: + return fs.CharacterDevice + case syscall.S_IFBLK: + return fs.BlockDevice + case syscall.S_IFSOCK: + return fs.Socket + case syscall.S_IFDIR: + return fs.Directory + case syscall.S_IFREG: + return fs.RegularFile + default: + // This shouldn't happen, but just in case... + log.Warningf("unknown host file type %d: assuming regular", x) + return fs.RegularFile + } +} + +func wouldBlock(s *syscall.Stat_t) bool { + typ := nodeType(s) + return typ == fs.Pipe || typ == fs.Socket || typ == fs.CharacterDevice +} + +func stableAttr(s *syscall.Stat_t) fs.StableAttr { + return fs.StableAttr{ + Type: nodeType(s), + DeviceID: hostFileDevice.DeviceID(), + InodeID: hostFileDevice.Map(device.MultiDeviceKey{ + Device: s.Dev, + Inode: s.Ino, + }), + BlockSize: int64(s.Blksize), + } +} + +func owner(s *syscall.Stat_t) fs.FileOwner { + return fs.FileOwner{ + UID: auth.KUID(s.Uid), + GID: auth.KGID(s.Gid), + } +} + +func unstableAttr(s *syscall.Stat_t) fs.UnstableAttr { + return fs.UnstableAttr{ + Size: s.Size, + Usage: s.Blocks * 512, + Perms: fs.FilePermsFromMode(linux.FileMode(s.Mode)), + Owner: owner(s), + AccessTime: ktime.FromUnix(s.Atim.Sec, s.Atim.Nsec), + ModificationTime: ktime.FromUnix(s.Mtim.Sec, s.Mtim.Nsec), + StatusChangeTime: ktime.FromUnix(s.Ctim.Sec, s.Ctim.Nsec), + Links: uint64(s.Nlink), + } +} + +type dirInfo struct { + buf []byte // buffer for directory I/O. + nbuf int // length of buf; return value from ReadDirent. + bufp int // location of next record in buf. +} + +// LINT.IfChange + +// isBlockError unwraps os errors and checks if they are caused by EAGAIN or +// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock. +func isBlockError(err error) bool { + if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK { + return true + } + if pe, ok := err.(*os.PathError); ok { + return isBlockError(pe.Err) + } + return false +} + +// LINT.ThenChange(../../fsimpl/host/util.go) + +func hostEffectiveKIDs() (uint32, []uint32, error) { + gids, err := os.Getgroups() + if err != nil { + return 0, nil, err + } + egids := make([]uint32, len(gids)) + for i, gid := range gids { + egids[i] = uint32(gid) + } + return uint32(os.Geteuid()), append(egids, uint32(os.Getegid())), nil +} + +var hostUID uint32 +var hostGIDs []uint32 + +func init() { + hostUID, hostGIDs, _ = hostEffectiveKIDs() +} diff --git a/pkg/sentry/fs/host/util_amd64_unsafe.go b/pkg/sentry/fs/host/util_amd64_unsafe.go new file mode 100644 index 000000000..66da6e9f5 --- /dev/null +++ b/pkg/sentry/fs/host/util_amd64_unsafe.go @@ -0,0 +1,41 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package host + +import ( + "syscall" + "unsafe" +) + +func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) { + var stat syscall.Stat_t + namePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return stat, err + } + _, _, errno := syscall.Syscall6( + syscall.SYS_NEWFSTATAT, + uintptr(fd), + uintptr(unsafe.Pointer(namePtr)), + uintptr(unsafe.Pointer(&stat)), + uintptr(flags), + 0, 0) + if errno != 0 { + return stat, errno + } + return stat, nil +} diff --git a/pkg/sentry/fs/host/util_arm64_unsafe.go b/pkg/sentry/fs/host/util_arm64_unsafe.go new file mode 100644 index 000000000..e8cb94aeb --- /dev/null +++ b/pkg/sentry/fs/host/util_arm64_unsafe.go @@ -0,0 +1,41 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build arm64 + +package host + +import ( + "syscall" + "unsafe" +) + +func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) { + var stat syscall.Stat_t + namePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return stat, err + } + _, _, errno := syscall.Syscall6( + syscall.SYS_FSTATAT, + uintptr(fd), + uintptr(unsafe.Pointer(namePtr)), + uintptr(unsafe.Pointer(&stat)), + uintptr(flags), + 0, 0) + if errno != 0 { + return stat, errno + } + return stat, nil +} diff --git a/pkg/sentry/fs/host/util_unsafe.go b/pkg/sentry/fs/host/util_unsafe.go new file mode 100644 index 000000000..23bd35d64 --- /dev/null +++ b/pkg/sentry/fs/host/util_unsafe.go @@ -0,0 +1,77 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/sentry/fs" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" +) + +// NulByte is a single NUL byte. It is passed to readlinkat as an empty string. +var NulByte byte = '\x00' + +func readLink(fd int) (string, error) { + // Buffer sizing copied from os.Readlink. + for l := 128; ; l *= 2 { + b := make([]byte, l) + n, _, errno := syscall.Syscall6( + syscall.SYS_READLINKAT, + uintptr(fd), + uintptr(unsafe.Pointer(&NulByte)), // "" + uintptr(unsafe.Pointer(&b[0])), + uintptr(l), + 0, 0) + if errno != 0 { + return "", errno + } + if n < uintptr(l) { + return string(b[:n]), nil + } + } +} + +func timespecFromTimestamp(t ktime.Time, omit, setSysTime bool) syscall.Timespec { + if omit { + return syscall.Timespec{0, linux.UTIME_OMIT} + } + if setSysTime { + return syscall.Timespec{0, linux.UTIME_NOW} + } + return syscall.NsecToTimespec(t.Nanoseconds()) +} + +func setTimestamps(fd int, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + var sts [2]syscall.Timespec + sts[0] = timespecFromTimestamp(ts.ATime, ts.ATimeOmit, ts.ATimeSetSystemTime) + sts[1] = timespecFromTimestamp(ts.MTime, ts.MTimeOmit, ts.MTimeSetSystemTime) + _, _, errno := syscall.Syscall6( + syscall.SYS_UTIMENSAT, + uintptr(fd), + 0, /* path */ + uintptr(unsafe.Pointer(&sts)), + 0, /* flags */ + 0, 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/sentry/fs/host/wait_test.go b/pkg/sentry/fs/host/wait_test.go new file mode 100644 index 000000000..ce397a5e3 --- /dev/null +++ b/pkg/sentry/fs/host/wait_test.go @@ -0,0 +1,69 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/waiter" +) + +func TestWait(t *testing.T) { + var fds [2]int + err := syscall.Pipe(fds[:]) + if err != nil { + t.Fatalf("Unable to create pipe: %v", err) + } + + defer syscall.Close(fds[1]) + + ctx := contexttest.Context(t) + file, err := NewFile(ctx, fds[0]) + if err != nil { + syscall.Close(fds[0]) + t.Fatalf("NewFile failed: %v", err) + } + + defer file.DecRef() + + r := file.Readiness(waiter.EventIn) + if r != 0 { + t.Fatalf("File is ready for read when it shouldn't be.") + } + + e, ch := waiter.NewChannelEntry(nil) + file.EventRegister(&e, waiter.EventIn) + defer file.EventUnregister(&e) + + // Check that there are no notifications yet. + if len(ch) != 0 { + t.Fatalf("Channel is non-empty") + } + + // Write to the pipe, so it should be writable now. + syscall.Write(fds[1], []byte{1}) + + // Check that we get a notification. We need to yield the current thread + // so that the fdnotifier can deliver notifications, so we use a + // 1-second timeout instead of just checking the length of the channel. + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Channel not notified") + } +} diff --git a/pkg/sentry/fs/inode.go b/pkg/sentry/fs/inode.go new file mode 100644 index 000000000..a34fbc946 --- /dev/null +++ b/pkg/sentry/fs/inode.go @@ -0,0 +1,477 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/metric" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/fs/lock" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" +) + +var opens = metric.MustCreateNewUint64Metric("/fs/opens", false /* sync */, "Number of file opens.") + +// Inode is a file system object that can be simultaneously referenced by different +// components of the VFS (Dirent, fs.File, etc). +// +// +stateify savable +type Inode struct { + // AtomicRefCount is our reference count. + refs.AtomicRefCount + + // InodeOperations is the file system specific behavior of the Inode. + InodeOperations InodeOperations + + // StableAttr are stable cached attributes of the Inode. + StableAttr StableAttr + + // LockCtx is the file lock context. It manages its own sychronization and tracks + // regions of the Inode that have locks held. + LockCtx LockCtx + + // Watches is the set of inotify watches for this inode. + Watches *Watches + + // MountSource is the mount source this Inode is a part of. + MountSource *MountSource + + // overlay is the overlay entry for this Inode. + overlay *overlayEntry + + // appendMu is used to synchronize write operations into files which + // have been opened with O_APPEND. Operations which change a file size + // have to take this lock for read. Write operations to files with + // O_APPEND have to take this lock for write. + appendMu sync.RWMutex `state:"nosave"` +} + +// LockCtx is an Inode's lock context and contains different personalities of locks; both +// Posix and BSD style locks are supported. +// +// Note that in Linux fcntl(2) and flock(2) locks are _not_ cooperative, because race and +// deadlock conditions make merging them prohibitive. We do the same and keep them oblivious +// to each other but provide a "context" as a convenient container. +// +// +stateify savable +type LockCtx struct { + // Posix is a set of POSIX-style regional advisory locks, see fcntl(2). + Posix lock.Locks + + // BSD is a set of BSD-style advisory file wide locks, see flock(2). + BSD lock.Locks +} + +// NewInode constructs an Inode from InodeOperations, a MountSource, and stable attributes. +// +// NewInode takes a reference on msrc. +func NewInode(ctx context.Context, iops InodeOperations, msrc *MountSource, sattr StableAttr) *Inode { + msrc.IncRef() + i := Inode{ + InodeOperations: iops, + StableAttr: sattr, + Watches: newWatches(), + MountSource: msrc, + } + i.EnableLeakCheck("fs.Inode") + return &i +} + +// DecRef drops a reference on the Inode. +func (i *Inode) DecRef() { + i.DecRefWithDestructor(i.destroy) +} + +// destroy releases the Inode and releases the msrc reference taken. +func (i *Inode) destroy() { + ctx := context.Background() + if err := i.WriteOut(ctx); err != nil { + // FIXME(b/65209558): Mark as warning again once noatime is + // properly supported. + log.Debugf("Inode %+v, failed to sync all metadata: %v", i.StableAttr, err) + } + + // If this inode is being destroyed because it was unlinked, queue a + // deletion event. This may not be the case for inodes being revalidated. + if i.Watches.unlinked { + i.Watches.Notify("", linux.IN_DELETE_SELF, 0) + } + + // Remove references from the watch owners to the watches on this inode, + // since the watches are about to be GCed. Note that we don't need to worry + // about the watch pins since if there were any active pins, this inode + // wouldn't be in the destructor. + i.Watches.targetDestroyed() + + if i.overlay != nil { + i.overlay.release() + } else { + i.InodeOperations.Release(ctx) + } + + i.MountSource.DecRef() +} + +// Mappable calls i.InodeOperations.Mappable. +func (i *Inode) Mappable() memmap.Mappable { + if i.overlay != nil { + // In an overlay, Mappable is always implemented by + // the overlayEntry metadata to synchronize memory + // access of files with copy up. But first check if + // the Inodes involved would be mappable in the first + // place. + i.overlay.copyMu.RLock() + ok := i.overlay.isMappableLocked() + i.overlay.copyMu.RUnlock() + if !ok { + return nil + } + return i.overlay + } + return i.InodeOperations.Mappable(i) +} + +// WriteOut calls i.InodeOperations.WriteOut with i as the Inode. +func (i *Inode) WriteOut(ctx context.Context) error { + if i.overlay != nil { + return overlayWriteOut(ctx, i.overlay) + } + return i.InodeOperations.WriteOut(ctx, i) +} + +// Lookup calls i.InodeOperations.Lookup with i as the directory. +func (i *Inode) Lookup(ctx context.Context, name string) (*Dirent, error) { + if i.overlay != nil { + d, _, err := overlayLookup(ctx, i.overlay, i, name) + return d, err + } + return i.InodeOperations.Lookup(ctx, i, name) +} + +// Create calls i.InodeOperations.Create with i as the directory. +func (i *Inode) Create(ctx context.Context, d *Dirent, name string, flags FileFlags, perm FilePermissions) (*File, error) { + if i.overlay != nil { + return overlayCreate(ctx, i.overlay, d, name, flags, perm) + } + return i.InodeOperations.Create(ctx, i, name, flags, perm) +} + +// CreateDirectory calls i.InodeOperations.CreateDirectory with i as the directory. +func (i *Inode) CreateDirectory(ctx context.Context, d *Dirent, name string, perm FilePermissions) error { + if i.overlay != nil { + return overlayCreateDirectory(ctx, i.overlay, d, name, perm) + } + return i.InodeOperations.CreateDirectory(ctx, i, name, perm) +} + +// CreateLink calls i.InodeOperations.CreateLink with i as the directory. +func (i *Inode) CreateLink(ctx context.Context, d *Dirent, oldname string, newname string) error { + if i.overlay != nil { + return overlayCreateLink(ctx, i.overlay, d, oldname, newname) + } + return i.InodeOperations.CreateLink(ctx, i, oldname, newname) +} + +// CreateHardLink calls i.InodeOperations.CreateHardLink with i as the directory. +func (i *Inode) CreateHardLink(ctx context.Context, d *Dirent, target *Dirent, name string) error { + if i.overlay != nil { + return overlayCreateHardLink(ctx, i.overlay, d, target, name) + } + return i.InodeOperations.CreateHardLink(ctx, i, target.Inode, name) +} + +// CreateFifo calls i.InodeOperations.CreateFifo with i as the directory. +func (i *Inode) CreateFifo(ctx context.Context, d *Dirent, name string, perm FilePermissions) error { + if i.overlay != nil { + return overlayCreateFifo(ctx, i.overlay, d, name, perm) + } + return i.InodeOperations.CreateFifo(ctx, i, name, perm) +} + +// Remove calls i.InodeOperations.Remove/RemoveDirectory with i as the directory. +func (i *Inode) Remove(ctx context.Context, d *Dirent, remove *Dirent) error { + if i.overlay != nil { + return overlayRemove(ctx, i.overlay, d, remove) + } + switch remove.Inode.StableAttr.Type { + case Directory, SpecialDirectory: + return i.InodeOperations.RemoveDirectory(ctx, i, remove.name) + default: + return i.InodeOperations.Remove(ctx, i, remove.name) + } +} + +// Rename calls i.InodeOperations.Rename with the given arguments. +func (i *Inode) Rename(ctx context.Context, oldParent *Dirent, renamed *Dirent, newParent *Dirent, newName string, replacement bool) error { + if i.overlay != nil { + return overlayRename(ctx, i.overlay, oldParent, renamed, newParent, newName, replacement) + } + return i.InodeOperations.Rename(ctx, renamed.Inode, oldParent.Inode, renamed.name, newParent.Inode, newName, replacement) +} + +// Bind calls i.InodeOperations.Bind with i as the directory. +func (i *Inode) Bind(ctx context.Context, parent *Dirent, name string, data transport.BoundEndpoint, perm FilePermissions) (*Dirent, error) { + if i.overlay != nil { + return overlayBind(ctx, i.overlay, parent, name, data, perm) + } + return i.InodeOperations.Bind(ctx, i, name, data, perm) +} + +// BoundEndpoint calls i.InodeOperations.BoundEndpoint with i as the Inode. +func (i *Inode) BoundEndpoint(path string) transport.BoundEndpoint { + if i.overlay != nil { + return overlayBoundEndpoint(i.overlay, path) + } + return i.InodeOperations.BoundEndpoint(i, path) +} + +// GetFile calls i.InodeOperations.GetFile with the given arguments. +func (i *Inode) GetFile(ctx context.Context, d *Dirent, flags FileFlags) (*File, error) { + if i.overlay != nil { + return overlayGetFile(ctx, i.overlay, d, flags) + } + opens.Increment() + return i.InodeOperations.GetFile(ctx, d, flags) +} + +// UnstableAttr calls i.InodeOperations.UnstableAttr with i as the Inode. +func (i *Inode) UnstableAttr(ctx context.Context) (UnstableAttr, error) { + if i.overlay != nil { + return overlayUnstableAttr(ctx, i.overlay) + } + return i.InodeOperations.UnstableAttr(ctx, i) +} + +// GetXattr calls i.InodeOperations.GetXattr with i as the Inode. +func (i *Inode) GetXattr(ctx context.Context, name string, size uint64) (string, error) { + if i.overlay != nil { + return overlayGetXattr(ctx, i.overlay, name, size) + } + return i.InodeOperations.GetXattr(ctx, i, name, size) +} + +// SetXattr calls i.InodeOperations.SetXattr with i as the Inode. +func (i *Inode) SetXattr(ctx context.Context, d *Dirent, name, value string, flags uint32) error { + if i.overlay != nil { + return overlaySetxattr(ctx, i.overlay, d, name, value, flags) + } + return i.InodeOperations.SetXattr(ctx, i, name, value, flags) +} + +// ListXattr calls i.InodeOperations.ListXattr with i as the Inode. +func (i *Inode) ListXattr(ctx context.Context, size uint64) (map[string]struct{}, error) { + if i.overlay != nil { + return overlayListXattr(ctx, i.overlay, size) + } + return i.InodeOperations.ListXattr(ctx, i, size) +} + +// RemoveXattr calls i.InodeOperations.RemoveXattr with i as the Inode. +func (i *Inode) RemoveXattr(ctx context.Context, d *Dirent, name string) error { + if i.overlay != nil { + return overlayRemoveXattr(ctx, i.overlay, d, name) + } + return i.InodeOperations.RemoveXattr(ctx, i, name) +} + +// CheckPermission will check if the caller may access this file in the +// requested way for reading, writing, or executing. +// +// CheckPermission is like Linux's fs/namei.c:inode_permission. It +// - checks file system mount flags, +// - and utilizes InodeOperations.Check to check capabilities and modes. +func (i *Inode) CheckPermission(ctx context.Context, p PermMask) error { + // First check the outer-most mounted filesystem. + if p.Write && i.MountSource.Flags.ReadOnly { + return syserror.EROFS + } + + if i.overlay != nil { + // CheckPermission requires some special handling for + // an overlay. + // + // Writes will always be redirected to an upper filesystem, + // so ignore all lower layers being read-only. + // + // But still honor the upper-most filesystem's mount flags; + // we should not attempt to modify the writable layer if it + // is mounted read-only. + if p.Write && overlayUpperMountSource(i.MountSource).Flags.ReadOnly { + return syserror.EROFS + } + } + + return i.check(ctx, p) +} + +func (i *Inode) check(ctx context.Context, p PermMask) error { + if i.overlay != nil { + return overlayCheck(ctx, i.overlay, p) + } + if !i.InodeOperations.Check(ctx, i, p) { + return syserror.EACCES + } + return nil +} + +// SetPermissions calls i.InodeOperations.SetPermissions with i as the Inode. +func (i *Inode) SetPermissions(ctx context.Context, d *Dirent, f FilePermissions) bool { + if i.overlay != nil { + return overlaySetPermissions(ctx, i.overlay, d, f) + } + return i.InodeOperations.SetPermissions(ctx, i, f) +} + +// SetOwner calls i.InodeOperations.SetOwner with i as the Inode. +func (i *Inode) SetOwner(ctx context.Context, d *Dirent, o FileOwner) error { + if i.overlay != nil { + return overlaySetOwner(ctx, i.overlay, d, o) + } + return i.InodeOperations.SetOwner(ctx, i, o) +} + +// SetTimestamps calls i.InodeOperations.SetTimestamps with i as the Inode. +func (i *Inode) SetTimestamps(ctx context.Context, d *Dirent, ts TimeSpec) error { + if i.overlay != nil { + return overlaySetTimestamps(ctx, i.overlay, d, ts) + } + return i.InodeOperations.SetTimestamps(ctx, i, ts) +} + +// Truncate calls i.InodeOperations.Truncate with i as the Inode. +func (i *Inode) Truncate(ctx context.Context, d *Dirent, size int64) error { + if IsDir(i.StableAttr) { + return syserror.EISDIR + } + + if i.overlay != nil { + return overlayTruncate(ctx, i.overlay, d, size) + } + i.appendMu.RLock() + defer i.appendMu.RUnlock() + return i.InodeOperations.Truncate(ctx, i, size) +} + +func (i *Inode) Allocate(ctx context.Context, d *Dirent, offset int64, length int64) error { + if i.overlay != nil { + return overlayAllocate(ctx, i.overlay, d, offset, length) + } + return i.InodeOperations.Allocate(ctx, i, offset, length) +} + +// Readlink calls i.InodeOperations.Readlnk with i as the Inode. +func (i *Inode) Readlink(ctx context.Context) (string, error) { + if i.overlay != nil { + return overlayReadlink(ctx, i.overlay) + } + return i.InodeOperations.Readlink(ctx, i) +} + +// Getlink calls i.InodeOperations.Getlink. +func (i *Inode) Getlink(ctx context.Context) (*Dirent, error) { + if i.overlay != nil { + return overlayGetlink(ctx, i.overlay) + } + return i.InodeOperations.Getlink(ctx, i) +} + +// AddLink calls i.InodeOperations.AddLink. +func (i *Inode) AddLink() { + if i.overlay != nil { + // This interface is only used by ramfs to update metadata of + // children. These filesystems should _never_ have overlay + // Inodes cached as children. So explicitly disallow this + // scenario and avoid plumbing Dirents through to do copy up. + panic("overlay Inodes cached in ramfs directories are not supported") + } + i.InodeOperations.AddLink() +} + +// DropLink calls i.InodeOperations.DropLink. +func (i *Inode) DropLink() { + if i.overlay != nil { + // Same as AddLink. + panic("overlay Inodes cached in ramfs directories are not supported") + } + i.InodeOperations.DropLink() +} + +// IsVirtual calls i.InodeOperations.IsVirtual. +func (i *Inode) IsVirtual() bool { + if i.overlay != nil { + // An overlay configuration does not support virtual files. + return false + } + return i.InodeOperations.IsVirtual() +} + +// StatFS calls i.InodeOperations.StatFS. +func (i *Inode) StatFS(ctx context.Context) (Info, error) { + if i.overlay != nil { + return overlayStatFS(ctx, i.overlay) + } + return i.InodeOperations.StatFS(ctx) +} + +// CheckOwnership checks whether `ctx` owns this Inode or may act as its owner. +// Compare Linux's fs/inode.c:inode_owner_or_capable(). +func (i *Inode) CheckOwnership(ctx context.Context) bool { + uattr, err := i.UnstableAttr(ctx) + if err != nil { + return false + } + creds := auth.CredentialsFromContext(ctx) + if uattr.Owner.UID == creds.EffectiveKUID { + return true + } + if creds.HasCapability(linux.CAP_FOWNER) && creds.UserNamespace.MapFromKUID(uattr.Owner.UID).Ok() { + return true + } + return false +} + +// CheckCapability checks whether `ctx` has capability `cp` with respect to +// operations on this Inode. +// +// Compare Linux's kernel/capability.c:capable_wrt_inode_uidgid(). +func (i *Inode) CheckCapability(ctx context.Context, cp linux.Capability) bool { + uattr, err := i.UnstableAttr(ctx) + if err != nil { + return false + } + creds := auth.CredentialsFromContext(ctx) + if !creds.UserNamespace.MapFromKUID(uattr.Owner.UID).Ok() { + return false + } + if !creds.UserNamespace.MapFromKGID(uattr.Owner.GID).Ok() { + return false + } + return creds.HasCapability(cp) +} + +func (i *Inode) lockAppendMu(appendMode bool) func() { + if appendMode { + i.appendMu.Lock() + return i.appendMu.Unlock + } + i.appendMu.RLock() + return i.appendMu.RUnlock +} diff --git a/pkg/sentry/fs/inode_inotify.go b/pkg/sentry/fs/inode_inotify.go new file mode 100644 index 000000000..efd3c962b --- /dev/null +++ b/pkg/sentry/fs/inode_inotify.go @@ -0,0 +1,170 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/sync" +) + +// Watches is the collection of inotify watches on an inode. +// +// +stateify savable +type Watches struct { + // mu protects the fields below. + mu sync.RWMutex `state:"nosave"` + + // ws is the map of active watches in this collection, keyed by the inotify + // instance id of the owner. + ws map[uint64]*Watch + + // unlinked indicates whether the target inode was ever unlinked. This is a + // hack to figure out if we should queue a IN_DELETE_SELF event when this + // watches collection is being destroyed, since otherwise we have no way of + // knowing if the target inode is going down due to a deletion or + // revalidation. + unlinked bool +} + +func newWatches() *Watches { + return &Watches{} +} + +// MarkUnlinked indicates the target for this set of watches to be unlinked. +// This has implications for the IN_EXCL_UNLINK flag. +func (w *Watches) MarkUnlinked() { + w.mu.Lock() + defer w.mu.Unlock() + w.unlinked = true +} + +// Lookup returns a matching watch with the given id. Returns nil if no such +// watch exists. Note that the result returned by this method only remains valid +// if the inotify instance owning the watch is locked, preventing modification +// of the returned watch and preventing the replacement of the watch by another +// one from the same instance (since there may be at most one watch per +// instance, per target). +func (w *Watches) Lookup(id uint64) *Watch { + w.mu.Lock() + defer w.mu.Unlock() + return w.ws[id] +} + +// Add adds watch into this set of watches. The watch being added must be unique +// - its ID() should not collide with any existing watches. +func (w *Watches) Add(watch *Watch) { + w.mu.Lock() + defer w.mu.Unlock() + + // Sanity check, the new watch shouldn't collide with an existing + // watch. Silently replacing an existing watch would result in a ref leak on + // this inode. We could handle this collision by calling Unpin() on the + // existing watch, but then we end up leaking watch descriptor ids at the + // inotify level. + if _, exists := w.ws[watch.ID()]; exists { + panic(fmt.Sprintf("Watch collision with ID %+v", watch.ID())) + } + if w.ws == nil { + w.ws = make(map[uint64]*Watch) + } + w.ws[watch.ID()] = watch +} + +// Remove removes a watch with the given id from this set of watches. The caller +// is responsible for generating any watch removal event, as appropriate. The +// provided id must match an existing watch in this collection. +func (w *Watches) Remove(id uint64) { + w.mu.Lock() + defer w.mu.Unlock() + + if w.ws == nil { + // This watch set is being destroyed. The thread executing the + // destructor is already in the process of deleting all our watches. We + // got here with no refs on the inode because we raced with the + // destructor notifying all the watch owners of the inode's destruction. + // See the comment in Watches.TargetDestroyed for why this race exists. + return + } + + watch, ok := w.ws[id] + if !ok { + // While there's technically no problem with silently ignoring a missing + // watch, this is almost certainly a bug. + panic(fmt.Sprintf("Attempt to remove a watch, but no watch found with provided id %+v.", id)) + } + delete(w.ws, watch.ID()) +} + +// Notify queues a new event with all watches in this set. +func (w *Watches) Notify(name string, events, cookie uint32) { + // N.B. We don't defer the unlocks because Notify is in the hot path of + // all IO operations, and the defer costs too much for small IO + // operations. + w.mu.RLock() + for _, watch := range w.ws { + if name != "" && w.unlinked && !watch.NotifyParentAfterUnlink() { + // IN_EXCL_UNLINK - By default, when watching events on the children + // of a directory, events are generated for children even after they + // have been unlinked from the directory. This can result in large + // numbers of uninteresting events for some applications (e.g., if + // watching /tmp, in which many applications create temporary files + // whose names are immediately unlinked). Specifying IN_EXCL_UNLINK + // changes the default behavior, so that events are not generated + // for children after they have been unlinked from the watched + // directory. -- inotify(7) + // + // We know we're dealing with events for a parent when the name + // isn't empty. + continue + } + watch.Notify(name, events, cookie) + } + w.mu.RUnlock() +} + +// Unpin unpins dirent from all watches in this set. +func (w *Watches) Unpin(d *Dirent) { + w.mu.RLock() + defer w.mu.RUnlock() + for _, watch := range w.ws { + watch.Unpin(d) + } +} + +// targetDestroyed is called by the inode destructor to notify the watch owners +// of the impending destruction of the watch target. +func (w *Watches) targetDestroyed() { + var ws map[uint64]*Watch + + // We can't hold w.mu while calling watch.TargetDestroyed to preserve lock + // ordering w.r.t to the owner inotify instances. Instead, atomically move + // the watches map into a local variable so we can iterate over it safely. + // + // Because of this however, it is possible for the watches' owners to reach + // this inode while the inode has no refs. This is still safe because the + // owners can only reach the inode until this function finishes calling + // watch.TargetDestroyed() below and the inode is guaranteed to exist in the + // meanwhile. But we still have to be very careful not to rely on inode + // state that may have been already destroyed. + w.mu.Lock() + ws = w.ws + w.ws = nil + w.mu.Unlock() + + for _, watch := range ws { + watch.TargetDestroyed() + } +} diff --git a/pkg/sentry/fs/inode_operations.go b/pkg/sentry/fs/inode_operations.go new file mode 100644 index 000000000..2bbfb72ef --- /dev/null +++ b/pkg/sentry/fs/inode_operations.go @@ -0,0 +1,326 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "errors" + + "gvisor.dev/gvisor/pkg/context" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" +) + +var ( + // ErrResolveViaReadlink is a special error value returned by + // InodeOperations.Getlink() to indicate that a link should be + // resolved automatically by walking to the path returned by + // InodeOperations.Readlink(). + ErrResolveViaReadlink = errors.New("link should be resolved via Readlink()") +) + +// TimeSpec contains access and modification timestamps. If either ATimeOmit or +// MTimeOmit is true, then the corresponding timestamp should not be updated. +// If either ATimeSetSystemTime or MTimeSetSystemTime are set then the +// corresponding timestamp should be ignored and the time will be set to the +// current system time. +type TimeSpec struct { + ATime ktime.Time + ATimeOmit bool + ATimeSetSystemTime bool + MTime ktime.Time + MTimeOmit bool + MTimeSetSystemTime bool +} + +// InodeOperations are operations on an Inode that diverge per file system. +// +// Objects that implement InodeOperations may cache file system "private" +// data that is useful for implementing these methods. In contrast, Inode +// contains state that is common to all Inodes; this state may be optionally +// used by InodeOperations. An object that implements InodeOperations may +// not take a reference on an Inode. +type InodeOperations interface { + // Release releases all private file system data held by this object. + // Once Release is called, this object is dead (no other methods will + // ever be called). + Release(context.Context) + + // Lookup loads an Inode at name under dir into a Dirent. The name + // is a valid component path: it contains no "/"s nor is the empty + // string. + // + // Lookup may return one of: + // + // * A nil Dirent and a non-nil error. If the reason that Lookup failed + // was because the name does not exist under Inode, then must return + // syserror.ENOENT. + // + // * If name does not exist under dir and the file system wishes this + // fact to be cached, a non-nil Dirent containing a nil Inode and a + // nil error. This is a negative Dirent and must have exactly one + // reference (at-construction reference). + // + // * If name does exist under this dir, a non-nil Dirent containing a + // non-nil Inode, and a nil error. File systems that take extra + // references on this Dirent should implement DirentOperations. + Lookup(ctx context.Context, dir *Inode, name string) (*Dirent, error) + + // Create creates an Inode at name under dir and returns a new File + // whose Dirent backs the new Inode. Implementations must ensure that + // name does not already exist. Create may return one of: + // + // * A nil File and a non-nil error. + // + // * A non-nil File and a nil error. File.Dirent will be a new Dirent, + // with a single reference held by File. File systems that take extra + // references on this Dirent should implement DirentOperations. + // + // The caller must ensure that this operation is permitted. + Create(ctx context.Context, dir *Inode, name string, flags FileFlags, perm FilePermissions) (*File, error) + + // CreateDirectory creates a new directory under this dir. + // CreateDirectory should otherwise do the same as Create. + // + // The caller must ensure that this operation is permitted. + CreateDirectory(ctx context.Context, dir *Inode, name string, perm FilePermissions) error + + // CreateLink creates a symbolic link under dir between newname + // and oldname. CreateLink should otherwise do the same as Create. + // + // The caller must ensure that this operation is permitted. + CreateLink(ctx context.Context, dir *Inode, oldname string, newname string) error + + // CreateHardLink creates a hard link under dir between the target + // Inode and name. + // + // The caller must ensure this operation is permitted. + CreateHardLink(ctx context.Context, dir *Inode, target *Inode, name string) error + + // CreateFifo creates a new named pipe under dir at name. + // + // The caller must ensure that this operation is permitted. + CreateFifo(ctx context.Context, dir *Inode, name string, perm FilePermissions) error + + // Remove removes the given named non-directory under dir. + // + // The caller must ensure that this operation is permitted. + Remove(ctx context.Context, dir *Inode, name string) error + + // RemoveDirectory removes the given named directory under dir. + // + // The caller must ensure that this operation is permitted. + // + // RemoveDirectory should check that the directory to be + // removed is empty. + RemoveDirectory(ctx context.Context, dir *Inode, name string) error + + // Rename atomically renames oldName under oldParent to newName under + // newParent where oldParent and newParent are directories. inode is + // the Inode of this InodeOperations. + // + // If replacement is true, then newName already exists and this call + // will replace it with oldName. + // + // Implementations are responsible for rejecting renames that replace + // non-empty directories. + Rename(ctx context.Context, inode *Inode, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error + + // Bind binds a new socket under dir at the given name. + // + // The caller must ensure that this operation is permitted. + Bind(ctx context.Context, dir *Inode, name string, data transport.BoundEndpoint, perm FilePermissions) (*Dirent, error) + + // BoundEndpoint returns the socket endpoint at path stored in + // or generated by an Inode. + // + // The path is only relevant for generated endpoint because stored + // endpoints already know their path. It is ok for the endpoint to + // hold onto their path because the only way to change a bind + // address is to rebind the socket. + // + // This is valid iff the type of the Inode is a Socket, which + // generally implies that this Inode was created via CreateSocket. + // + // If there is no socket endpoint available, nil will be returned. + BoundEndpoint(inode *Inode, path string) transport.BoundEndpoint + + // GetFile returns a new open File backed by a Dirent and FileFlags. + // + // Special Inode types may block using ctx.Sleeper. RegularFiles, + // Directories, and Symlinks must not block (see doCopyUp). + // + // The returned File will uniquely back an application fd. + GetFile(ctx context.Context, d *Dirent, flags FileFlags) (*File, error) + + // UnstableAttr returns the most up-to-date "unstable" attributes of + // an Inode, where "unstable" means that they change in response to + // file system events. + UnstableAttr(ctx context.Context, inode *Inode) (UnstableAttr, error) + + // GetXattr retrieves the value of extended attribute specified by name. + // Inodes that do not support extended attributes return EOPNOTSUPP. Inodes + // that support extended attributes but don't have a value at name return + // ENODATA. + // + // If this is called through the getxattr(2) syscall, size indicates the + // size of the buffer that the application has allocated to hold the + // attribute value. If the value is larger than size, implementations may + // return ERANGE to indicate that the buffer is too small, but they are also + // free to ignore the hint entirely (i.e. the value returned may be larger + // than size). All size checking is done independently at the syscall layer. + GetXattr(ctx context.Context, inode *Inode, name string, size uint64) (string, error) + + // SetXattr sets the value of extended attribute specified by name. Inodes + // that do not support extended attributes return EOPNOTSUPP. + SetXattr(ctx context.Context, inode *Inode, name, value string, flags uint32) error + + // ListXattr returns the set of all extended attributes names that + // have values. Inodes that do not support extended attributes return + // EOPNOTSUPP. + // + // If this is called through the listxattr(2) syscall, size indicates the + // size of the buffer that the application has allocated to hold the + // attribute list. If the list would be larger than size, implementations may + // return ERANGE to indicate that the buffer is too small, but they are also + // free to ignore the hint entirely. All size checking is done independently + // at the syscall layer. + ListXattr(ctx context.Context, inode *Inode, size uint64) (map[string]struct{}, error) + + // RemoveXattr removes an extended attribute specified by name. Inodes that + // do not support extended attributes return EOPNOTSUPP. + RemoveXattr(ctx context.Context, inode *Inode, name string) error + + // Check determines whether an Inode can be accessed with the + // requested permission mask using the context (which gives access + // to Credentials and UserNamespace). + Check(ctx context.Context, inode *Inode, p PermMask) bool + + // SetPermissions sets new permissions for an Inode. Returns false + // if it was not possible to set the new permissions. + // + // The caller must ensure that this operation is permitted. + SetPermissions(ctx context.Context, inode *Inode, f FilePermissions) bool + + // SetOwner sets the ownership for this file. + // + // If either UID or GID are set to auth.NoID, its value will not be + // changed. + // + // The caller must ensure that this operation is permitted. + SetOwner(ctx context.Context, inode *Inode, owner FileOwner) error + + // SetTimestamps sets the access and modification timestamps of an + // Inode according to the access and modification times in the TimeSpec. + // + // If either ATimeOmit or MTimeOmit is set, then the corresponding + // timestamp is not updated. + // + // If either ATimeSetSystemTime or MTimeSetSystemTime is true, that + // timestamp is set to the current time instead. + // + // The caller must ensure that this operation is permitted. + SetTimestamps(ctx context.Context, inode *Inode, ts TimeSpec) error + + // Truncate changes the size of an Inode. Truncate should not check + // permissions internally, as it is used for both sys_truncate and + // sys_ftruncate. + // + // Implementations need not check that length >= 0. + Truncate(ctx context.Context, inode *Inode, size int64) error + + // Allocate allows the caller to reserve disk space for the inode. + // It's equivalent to fallocate(2) with 'mode=0'. + Allocate(ctx context.Context, inode *Inode, offset int64, length int64) error + + // WriteOut writes cached Inode state to a backing filesystem in a + // synchronous manner. + // + // File systems that do not cache metadata or data via an Inode + // implement WriteOut as a no-op. File systems that are entirely in + // memory also implement WriteOut as a no-op. Otherwise file systems + // call Inode.Sync to write back page cached data and cached metadata + // followed by syncing writeback handles. + // + // It derives from include/linux/fs.h:super_operations->write_inode. + WriteOut(ctx context.Context, inode *Inode) error + + // Readlink reads the symlink path of an Inode. + // + // Readlink is permitted to return a different path depending on ctx, + // the request originator. + // + // The caller must ensure that this operation is permitted. + // + // Readlink should check that Inode is a symlink and its content is + // at least readable. + Readlink(ctx context.Context, inode *Inode) (string, error) + + // Getlink resolves a symlink to a target *Dirent. + // + // Filesystems that can resolve the link by walking to the path returned + // by Readlink should return (nil, ErrResolveViaReadlink), which + // triggers link resolution via Realink and Lookup. + // + // Some links cannot be followed by Lookup. In this case, Getlink can + // return the Dirent of the link target. The caller holds a reference + // to the Dirent. Filesystems that return a non-nil *Dirent from Getlink + // cannot participate in an overlay because it is impossible for the + // overlay to ascertain whether or not the *Dirent should contain an + // overlayEntry. + // + // Any error returned from Getlink other than ErrResolveViaReadlink + // indicates the caller's inability to traverse this Inode as a link + // (e.g. syserror.ENOLINK indicates that the Inode is not a link, + // syscall.EPERM indicates that traversing the link is not allowed, etc). + Getlink(context.Context, *Inode) (*Dirent, error) + + // Mappable returns a memmap.Mappable that provides memory mappings of the + // Inode's data. Mappable may return nil if this is not supported. The + // returned Mappable must remain valid until InodeOperations.Release is + // called. + Mappable(*Inode) memmap.Mappable + + // The below methods require cleanup. + + // AddLink increments the hard link count of an Inode. + // + // Remove in favor of Inode.IncLink. + AddLink() + + // DropLink decrements the hard link count of an Inode. + // + // Remove in favor of Inode.DecLink. + DropLink() + + // NotifyStatusChange sets the status change time to the current time. + // + // Remove in favor of updating the Inode's cached status change time. + NotifyStatusChange(ctx context.Context) + + // IsVirtual indicates whether or not this corresponds to a virtual + // resource. + // + // If IsVirtual returns true, then caching will be disabled for this + // node, and fs.Dirent.Freeze() will not stop operations on the node. + // + // Remove in favor of freezing specific mounts. + IsVirtual() bool + + // StatFS returns a filesystem Info implementation or an error. If + // the filesystem does not support this operation (maybe in the future + // it will), then ENOSYS should be returned. + StatFS(context.Context) (Info, error) +} diff --git a/pkg/sentry/fs/inode_overlay.go b/pkg/sentry/fs/inode_overlay.go new file mode 100644 index 000000000..537c8d257 --- /dev/null +++ b/pkg/sentry/fs/inode_overlay.go @@ -0,0 +1,737 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "strings" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/syserror" +) + +func overlayHasWhiteout(ctx context.Context, parent *Inode, name string) bool { + s, err := parent.GetXattr(ctx, XattrOverlayWhiteout(name), 1) + return err == nil && s == "y" +} + +func overlayCreateWhiteout(ctx context.Context, parent *Inode, name string) error { + return parent.InodeOperations.SetXattr(ctx, parent, XattrOverlayWhiteout(name), "y", 0 /* flags */) +} + +func overlayWriteOut(ctx context.Context, o *overlayEntry) error { + // Hot path. Avoid defers. + var err error + o.copyMu.RLock() + if o.upper != nil { + err = o.upper.InodeOperations.WriteOut(ctx, o.upper) + } + o.copyMu.RUnlock() + return err +} + +// overlayLookup performs a lookup in parent. +// +// If name exists, it returns true if the Dirent is in the upper, false if the +// Dirent is in the lower. +func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name string) (*Dirent, bool, error) { + // Hot path. Avoid defers. + parent.copyMu.RLock() + + // Assert that there is at least one upper or lower entry. + if parent.upper == nil && parent.lower == nil { + parent.copyMu.RUnlock() + panic("invalid overlayEntry, needs at least one Inode") + } + + var upperInode *Inode + var lowerInode *Inode + + // We must remember whether the upper fs returned a negative dirent, + // because it is only safe to return one if the upper did. + var negativeUpperChild bool + + // Does the parent directory exist in the upper file system? + if parent.upper != nil { + // First check if a file object exists in the upper file system. + // A file could have been created over a whiteout, so we need to + // check if something exists in the upper file system first. + child, err := parent.upper.Lookup(ctx, name) + if err != nil && err != syserror.ENOENT { + // We encountered an error that an overlay cannot handle, + // we must propagate it to the caller. + parent.copyMu.RUnlock() + return nil, false, err + } + if child != nil { + if child.IsNegative() { + negativeUpperChild = true + } else { + upperInode = child.Inode + upperInode.IncRef() + } + child.DecRef() + } + + // Are we done? + if overlayHasWhiteout(ctx, parent.upper, name) { + if upperInode == nil { + parent.copyMu.RUnlock() + if negativeUpperChild { + // If the upper fs returnd a negative + // Dirent, then the upper is OK with + // that negative Dirent being cached in + // the Dirent tree, so we can return + // one from the overlay. + return NewNegativeDirent(name), false, nil + } + // Upper fs is not OK with a negative Dirent + // being cached in the Dirent tree, so don't + // return one. + return nil, false, syserror.ENOENT + } + entry, err := newOverlayEntry(ctx, upperInode, nil, false) + if err != nil { + // Don't leak resources. + upperInode.DecRef() + parent.copyMu.RUnlock() + return nil, false, err + } + d, err := NewDirent(ctx, newOverlayInode(ctx, entry, inode.MountSource), name), nil + parent.copyMu.RUnlock() + return d, true, err + } + } + + // Check the lower file system. We do this unconditionally (even for + // non-directories) because we may need to use stable attributes from + // the lower filesystem (e.g. device number, inode number) that were + // visible before a copy up. + if parent.lower != nil { + // Check the lower file system. + child, err := parent.lower.Lookup(ctx, name) + // Same song and dance as above. + if err != nil && err != syserror.ENOENT { + // Don't leak resources. + if upperInode != nil { + upperInode.DecRef() + } + parent.copyMu.RUnlock() + return nil, false, err + } + if child != nil { + if !child.IsNegative() { + if upperInode == nil { + // If nothing was in the upper, use what we found in the lower. + lowerInode = child.Inode + lowerInode.IncRef() + } else { + // If we have something from the upper, we can only use it if the types + // match. + // NOTE(b/112312863): Allow SpecialDirectories and Directories to merge. + // This is needed to allow submounts in /proc and /sys. + if upperInode.StableAttr.Type == child.Inode.StableAttr.Type || + (IsDir(upperInode.StableAttr) && IsDir(child.Inode.StableAttr)) { + lowerInode = child.Inode + lowerInode.IncRef() + } + } + } + child.DecRef() + } + } + + // Was all of this for naught? + if upperInode == nil && lowerInode == nil { + parent.copyMu.RUnlock() + // We can only return a negative dirent if the upper returned + // one as well. See comments above regarding negativeUpperChild + // for more info. + if negativeUpperChild { + return NewNegativeDirent(name), false, nil + } + return nil, false, syserror.ENOENT + } + + // Did we find a lower Inode? Remember this because we may decide we don't + // actually need the lower Inode (see below). + lowerExists := lowerInode != nil + + // If we found something in the upper filesystem and the lower filesystem, + // use the stable attributes from the lower filesystem. If we don't do this, + // then it may appear that the file was magically recreated across copy up. + if upperInode != nil && lowerInode != nil { + // Steal attributes. + upperInode.StableAttr = lowerInode.StableAttr + + // For non-directories, the lower filesystem resource is strictly + // unnecessary because we don't need to copy-up and we will always + // operate (e.g. read/write) on the upper Inode. + if !IsDir(upperInode.StableAttr) { + lowerInode.DecRef() + lowerInode = nil + } + } + + // Phew, finally done. + entry, err := newOverlayEntry(ctx, upperInode, lowerInode, lowerExists) + if err != nil { + // Well, not quite, we failed at the last moment, how depressing. + // Be sure not to leak resources. + if upperInode != nil { + upperInode.DecRef() + } + if lowerInode != nil { + lowerInode.DecRef() + } + parent.copyMu.RUnlock() + return nil, false, err + } + d, err := NewDirent(ctx, newOverlayInode(ctx, entry, inode.MountSource), name), nil + parent.copyMu.RUnlock() + return d, upperInode != nil, err +} + +func overlayCreate(ctx context.Context, o *overlayEntry, parent *Dirent, name string, flags FileFlags, perm FilePermissions) (*File, error) { + // Sanity check. + if parent.Inode.overlay == nil { + panic(fmt.Sprintf("overlayCreate called with non-overlay parent inode (parent InodeOperations type is %T)", parent.Inode.InodeOperations)) + } + + // Dirent.Create takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return nil, err + } + + upperFile, err := o.upper.InodeOperations.Create(ctx, o.upper, name, flags, perm) + if err != nil { + return nil, err + } + + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + + // Take another reference on the upper file's inode, which will be + // owned by the overlay entry. + upperFile.Dirent.Inode.IncRef() + entry, err := newOverlayEntry(ctx, upperFile.Dirent.Inode, nil, false) + if err != nil { + werr := fmt.Errorf("newOverlayEntry failed: %v", err) + cleanupUpper(ctx, o.upper, name, werr) + return nil, err + } + + // NOTE(b/71766861): Replace the Dirent with a transient Dirent, since + // we are about to create the real Dirent: an overlay Dirent. + // + // This ensures the *fs.File returned from overlayCreate is in the same + // state as the *fs.File returned by overlayGetFile, where the upper + // file has a transient Dirent. + // + // This is necessary for Save/Restore, as otherwise the upper Dirent + // (which has no path as it is unparented and never reachable by the + // user) will clobber the real path for the underlying Inode. + upperFile.Dirent.Inode.IncRef() + upperDirent := NewTransientDirent(upperFile.Dirent.Inode) + upperFile.Dirent.DecRef() + upperFile.Dirent = upperDirent + + // Create the overlay inode and dirent. We need this to construct the + // overlay file. + overlayInode := newOverlayInode(ctx, entry, parent.Inode.MountSource) + // d will own the inode reference. + overlayDirent := NewDirent(ctx, overlayInode, name) + // The overlay file created below with NewFile will take a reference on + // the overlayDirent, and it should be the only thing holding a + // reference at the time of creation, so we must drop this reference. + defer overlayDirent.DecRef() + + // Create a new overlay file that wraps the upper file. + flags.Pread = upperFile.Flags().Pread + flags.Pwrite = upperFile.Flags().Pwrite + overlayFile := NewFile(ctx, overlayDirent, flags, &overlayFileOperations{upper: upperFile}) + + return overlayFile, nil +} + +func overlayCreateDirectory(ctx context.Context, o *overlayEntry, parent *Dirent, name string, perm FilePermissions) error { + // Dirent.CreateDirectory takes renameMu if the Inode is an overlay + // Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + if err := o.upper.InodeOperations.CreateDirectory(ctx, o.upper, name, perm); err != nil { + return err + } + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + return nil +} + +func overlayCreateLink(ctx context.Context, o *overlayEntry, parent *Dirent, oldname string, newname string) error { + // Dirent.CreateLink takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + if err := o.upper.InodeOperations.CreateLink(ctx, o.upper, oldname, newname); err != nil { + return err + } + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + return nil +} + +func overlayCreateHardLink(ctx context.Context, o *overlayEntry, parent *Dirent, target *Dirent, name string) error { + // Dirent.CreateHardLink takes renameMu if the Inode is an overlay + // Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + if err := copyUpLockedForRename(ctx, target); err != nil { + return err + } + if err := o.upper.InodeOperations.CreateHardLink(ctx, o.upper, target.Inode.overlay.upper, name); err != nil { + return err + } + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + return nil +} + +func overlayCreateFifo(ctx context.Context, o *overlayEntry, parent *Dirent, name string, perm FilePermissions) error { + // Dirent.CreateFifo takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + if err := o.upper.InodeOperations.CreateFifo(ctx, o.upper, name, perm); err != nil { + return err + } + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + return nil +} + +func overlayRemove(ctx context.Context, o *overlayEntry, parent *Dirent, child *Dirent) error { + // Dirent.Remove and Dirent.RemoveDirectory take renameMu if the Inode + // is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + child.Inode.overlay.copyMu.RLock() + defer child.Inode.overlay.copyMu.RUnlock() + if child.Inode.overlay.upper != nil { + if child.Inode.StableAttr.Type == Directory { + if err := o.upper.InodeOperations.RemoveDirectory(ctx, o.upper, child.name); err != nil { + return err + } + } else { + if err := o.upper.InodeOperations.Remove(ctx, o.upper, child.name); err != nil { + return err + } + } + } + if child.Inode.overlay.lowerExists { + if err := overlayCreateWhiteout(ctx, o.upper, child.name); err != nil { + return err + } + } + // We've removed from the directory so we must drop the cache. + o.markDirectoryDirty() + return nil +} + +func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, renamed *Dirent, newParent *Dirent, newName string, replacement bool) error { + // To be able to copy these up below, they have to be part of an + // overlay file system. + // + // Maybe some day we can allow the more complicated case of + // non-overlay X overlay renames, but that's not necessary right now. + if renamed.Inode.overlay == nil || newParent.Inode.overlay == nil || oldParent.Inode.overlay == nil { + return syserror.EXDEV + } + + if replacement { + // Check here if the file to be replaced exists and is a + // non-empty directory. If we copy up first, we may end up + // copying the directory but none of its children, so the + // directory will appear empty in the upper fs, which will then + // allow the rename to proceed when it should return ENOTEMPTY. + // + // NOTE(b/111808347): Ideally, we'd just pass in the replaced + // Dirent from Rename, but we must drop the reference on + // replaced before we make the rename call, so Rename can't + // pass the Dirent to the Inode without significantly + // complicating the API. Thus we look it up again here. + // + // For the same reason we can't use defer here. + replaced, inUpper, err := overlayLookup(ctx, newParent.Inode.overlay, newParent.Inode, newName) + // If err == ENOENT or a negative Dirent is returned, then + // newName has been removed out from under us. That's fine; + // filesystems where that can happen must handle stale + // 'replaced'. + if err != nil && err != syserror.ENOENT { + return err + } + if err == nil { + if !inUpper { + // newName doesn't exist in + // newParent.Inode.overlay.upper, thus from + // that Inode's perspective this won't be a + // replacing rename. + replacement = false + } + + if !replaced.IsNegative() && IsDir(replaced.Inode.StableAttr) { + children, err := readdirOne(ctx, replaced) + if err != nil { + replaced.DecRef() + return err + } + + // readdirOne ensures that "." and ".." are not + // included among the returned children, so we don't + // need to bother checking for them. + if len(children) > 0 { + replaced.DecRef() + return syserror.ENOTEMPTY + } + } + + replaced.DecRef() + } + } + + if err := copyUpLockedForRename(ctx, renamed); err != nil { + return err + } + if err := copyUpLockedForRename(ctx, newParent); err != nil { + return err + } + oldName := renamed.name + if err := o.upper.InodeOperations.Rename(ctx, renamed.Inode.overlay.upper, oldParent.Inode.overlay.upper, oldName, newParent.Inode.overlay.upper, newName, replacement); err != nil { + return err + } + if renamed.Inode.overlay.lowerExists { + if err := overlayCreateWhiteout(ctx, oldParent.Inode.overlay.upper, oldName); err != nil { + return err + } + } + // We've changed the directory so we must drop the cache. + oldParent.Inode.overlay.markDirectoryDirty() + return nil +} + +func overlayBind(ctx context.Context, o *overlayEntry, parent *Dirent, name string, data transport.BoundEndpoint, perm FilePermissions) (*Dirent, error) { + if err := copyUpLockedForRename(ctx, parent); err != nil { + return nil, err + } + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + d, err := o.upper.InodeOperations.Bind(ctx, o.upper, name, data, perm) + if err != nil { + return nil, err + } + + // We've added to the directory so we must drop the cache. + o.markDirectoryDirty() + + // Grab the inode and drop the dirent, we don't need it. + inode := d.Inode + inode.IncRef() + d.DecRef() + + // Create a new overlay entry and dirent for the socket. + entry, err := newOverlayEntry(ctx, inode, nil, false) + if err != nil { + inode.DecRef() + return nil, err + } + // Use the parent's MountSource, since that corresponds to the overlay, + // and not the upper filesystem. + return NewDirent(ctx, newOverlayInode(ctx, entry, parent.Inode.MountSource), name), nil +} + +func overlayBoundEndpoint(o *overlayEntry, path string) transport.BoundEndpoint { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + return o.upper.InodeOperations.BoundEndpoint(o.upper, path) + } + + return o.lower.BoundEndpoint(path) +} + +func overlayGetFile(ctx context.Context, o *overlayEntry, d *Dirent, flags FileFlags) (*File, error) { + // Hot path. Avoid defers. + if flags.Write { + if err := copyUp(ctx, d); err != nil { + return nil, err + } + } + + o.copyMu.RLock() + + if o.upper != nil { + upper, err := overlayFile(ctx, o.upper, flags) + if err != nil { + o.copyMu.RUnlock() + return nil, err + } + flags.Pread = upper.Flags().Pread + flags.Pwrite = upper.Flags().Pwrite + f, err := NewFile(ctx, d, flags, &overlayFileOperations{upper: upper}), nil + o.copyMu.RUnlock() + return f, err + } + + lower, err := overlayFile(ctx, o.lower, flags) + if err != nil { + o.copyMu.RUnlock() + return nil, err + } + flags.Pread = lower.Flags().Pread + flags.Pwrite = lower.Flags().Pwrite + o.copyMu.RUnlock() + return NewFile(ctx, d, flags, &overlayFileOperations{lower: lower}), nil +} + +func overlayUnstableAttr(ctx context.Context, o *overlayEntry) (UnstableAttr, error) { + // Hot path. Avoid defers. + var ( + attr UnstableAttr + err error + ) + o.copyMu.RLock() + if o.upper != nil { + attr, err = o.upper.UnstableAttr(ctx) + } else { + attr, err = o.lower.UnstableAttr(ctx) + } + o.copyMu.RUnlock() + return attr, err +} + +func overlayGetXattr(ctx context.Context, o *overlayEntry, name string, size uint64) (string, error) { + // Hot path. This is how the overlay checks for whiteout files. + // Avoid defers. + var ( + s string + err error + ) + + // Don't forward the value of the extended attribute if it would + // unexpectedly change the behavior of a wrapping overlay layer. + if strings.HasPrefix(XattrOverlayPrefix, name) { + return "", syserror.ENODATA + } + + o.copyMu.RLock() + if o.upper != nil { + s, err = o.upper.GetXattr(ctx, name, size) + } else { + s, err = o.lower.GetXattr(ctx, name, size) + } + o.copyMu.RUnlock() + return s, err +} + +func overlaySetxattr(ctx context.Context, o *overlayEntry, d *Dirent, name, value string, flags uint32) error { + // Don't allow changes to overlay xattrs through a setxattr syscall. + if strings.HasPrefix(XattrOverlayPrefix, name) { + return syserror.EPERM + } + + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.SetXattr(ctx, d, name, value, flags) +} + +func overlayListXattr(ctx context.Context, o *overlayEntry, size uint64) (map[string]struct{}, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + var names map[string]struct{} + var err error + if o.upper != nil { + names, err = o.upper.ListXattr(ctx, size) + } else { + names, err = o.lower.ListXattr(ctx, size) + } + for name := range names { + // Same as overlayGetXattr, we shouldn't forward along + // overlay attributes. + if strings.HasPrefix(XattrOverlayPrefix, name) { + delete(names, name) + } + } + return names, err +} + +func overlayRemoveXattr(ctx context.Context, o *overlayEntry, d *Dirent, name string) error { + // Don't allow changes to overlay xattrs through a removexattr syscall. + if strings.HasPrefix(XattrOverlayPrefix, name) { + return syserror.EPERM + } + + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.RemoveXattr(ctx, d, name) +} + +func overlayCheck(ctx context.Context, o *overlayEntry, p PermMask) error { + o.copyMu.RLock() + // Hot path. Avoid defers. + var err error + if o.upper != nil { + err = o.upper.check(ctx, p) + } else { + err = o.lower.check(ctx, p) + } + o.copyMu.RUnlock() + return err +} + +func overlaySetPermissions(ctx context.Context, o *overlayEntry, d *Dirent, f FilePermissions) bool { + if err := copyUp(ctx, d); err != nil { + return false + } + return o.upper.InodeOperations.SetPermissions(ctx, o.upper, f) +} + +func overlaySetOwner(ctx context.Context, o *overlayEntry, d *Dirent, owner FileOwner) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.SetOwner(ctx, o.upper, owner) +} + +func overlaySetTimestamps(ctx context.Context, o *overlayEntry, d *Dirent, ts TimeSpec) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.SetTimestamps(ctx, o.upper, ts) +} + +func overlayTruncate(ctx context.Context, o *overlayEntry, d *Dirent, size int64) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.Truncate(ctx, o.upper, size) +} + +func overlayAllocate(ctx context.Context, o *overlayEntry, d *Dirent, offset, length int64) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.Allocate(ctx, o.upper, offset, length) +} + +func overlayReadlink(ctx context.Context, o *overlayEntry) (string, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.Readlink(ctx) + } + return o.lower.Readlink(ctx) +} + +func overlayGetlink(ctx context.Context, o *overlayEntry) (*Dirent, error) { + var dirent *Dirent + var err error + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + dirent, err = o.upper.Getlink(ctx) + } else { + dirent, err = o.lower.Getlink(ctx) + } + if dirent != nil { + // This dirent is likely bogus (its Inode likely doesn't contain + // the right overlayEntry). So we're forced to drop it on the + // ground and claim that jumping around the filesystem like this + // is not supported. + name, _ := dirent.FullName(nil) + dirent.DecRef() + + // Claim that the path is not accessible. + err = syserror.EACCES + log.Warningf("Getlink not supported in overlay for %q", name) + } + return nil, err +} + +func overlayStatFS(ctx context.Context, o *overlayEntry) (Info, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + var i Info + var err error + if o.upper != nil { + i, err = o.upper.StatFS(ctx) + } else { + i, err = o.lower.StatFS(ctx) + } + if err != nil { + return Info{}, err + } + + i.Type = linux.OVERLAYFS_SUPER_MAGIC + + return i, nil +} + +// NewTestOverlayDir returns an overlay Inode for tests. +// +// If `revalidate` is true, then the upper filesystem will require +// revalidation. +func NewTestOverlayDir(ctx context.Context, upper, lower *Inode, revalidate bool) *Inode { + fs := &overlayFilesystem{} + var upperMsrc *MountSource + if revalidate { + upperMsrc = NewRevalidatingMountSource(ctx, fs, MountSourceFlags{}) + } else { + upperMsrc = NewNonCachingMountSource(ctx, fs, MountSourceFlags{}) + } + msrc := NewMountSource(ctx, &overlayMountSourceOperations{ + upper: upperMsrc, + lower: NewNonCachingMountSource(ctx, fs, MountSourceFlags{}), + }, fs, MountSourceFlags{}) + overlay := &overlayEntry{ + upper: upper, + lower: lower, + } + return newOverlayInode(ctx, overlay, msrc) +} + +// TestHasUpperFS returns true if i is an overlay Inode and it has a pointer +// to an Inode on an upper filesystem. +func (i *Inode) TestHasUpperFS() bool { + return i.overlay != nil && i.overlay.upper != nil +} + +// TestHasLowerFS returns true if i is an overlay Inode and it has a pointer +// to an Inode on a lower filesystem. +func (i *Inode) TestHasLowerFS() bool { + return i.overlay != nil && i.overlay.lower != nil +} diff --git a/pkg/sentry/fs/inode_overlay_test.go b/pkg/sentry/fs/inode_overlay_test.go new file mode 100644 index 000000000..389c219d6 --- /dev/null +++ b/pkg/sentry/fs/inode_overlay_test.go @@ -0,0 +1,470 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" + "gvisor.dev/gvisor/pkg/syserror" +) + +func TestLookup(t *testing.T) { + ctx := contexttest.Context(t) + for _, test := range []struct { + // Test description. + desc string + + // Lookup parameters. + dir *fs.Inode + name string + + // Want from lookup. + found bool + hasUpper bool + hasLower bool + }{ + { + desc: "no upper, lower has name", + dir: fs.NewTestOverlayDir(ctx, + nil, /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + { + desc: "no lower, upper has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + nil, /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, only lower has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "b", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + { + desc: "upper and lower, only upper has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "b", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, both have file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, both have directory", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: true, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: true, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: true, + hasLower: true, + }, + { + desc: "upper and lower, upper negative masks lower file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, nil, []string{"a"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: false, + hasUpper: false, + hasLower: false, + }, + { + desc: "upper and lower, upper negative does not mask lower file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, nil, []string{"b"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + false /* revalidate */), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + } { + t.Run(test.desc, func(t *testing.T) { + dirent, err := test.dir.Lookup(ctx, test.name) + if test.found && (err == syserror.ENOENT || dirent.IsNegative()) { + t.Fatalf("lookup %q expected to find positive dirent, got dirent %v err %v", test.name, dirent, err) + } + if !test.found { + if err != syserror.ENOENT && !dirent.IsNegative() { + t.Errorf("lookup %q expected to return ENOENT or negative dirent, got dirent %v err %v", test.name, dirent, err) + } + // Nothing more to check. + return + } + if hasUpper := dirent.Inode.TestHasUpperFS(); hasUpper != test.hasUpper { + t.Fatalf("lookup got upper filesystem %v, want %v", hasUpper, test.hasUpper) + } + if hasLower := dirent.Inode.TestHasLowerFS(); hasLower != test.hasLower { + t.Errorf("lookup got lower filesystem %v, want %v", hasLower, test.hasLower) + } + }) + } +} + +func TestLookupRevalidation(t *testing.T) { + // File name used in the tests. + fileName := "foofile" + ctx := contexttest.Context(t) + for _, tc := range []struct { + // Test description. + desc string + + // Upper and lower fs for the overlay. + upper *fs.Inode + lower *fs.Inode + + // Whether the upper requires revalidation. + revalidate bool + + // Whether we should get the same dirent on second lookup. + wantSame bool + }{ + { + desc: "file from upper with no revalidation", + upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + lower: newTestRamfsDir(ctx, nil, nil), + revalidate: false, + wantSame: true, + }, + { + desc: "file from upper with revalidation", + upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + lower: newTestRamfsDir(ctx, nil, nil), + revalidate: true, + wantSame: false, + }, + { + desc: "file from lower with no revalidation", + upper: newTestRamfsDir(ctx, nil, nil), + lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + revalidate: false, + wantSame: true, + }, + { + desc: "file from lower with revalidation", + upper: newTestRamfsDir(ctx, nil, nil), + lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + revalidate: true, + // The file does not exist in the upper, so we do not + // need to revalidate it. + wantSame: true, + }, + { + desc: "file from upper and lower with no revalidation", + upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + revalidate: false, + wantSame: true, + }, + { + desc: "file from upper and lower with revalidation", + upper: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + lower: newTestRamfsDir(ctx, []dirContent{{name: fileName}}, nil), + revalidate: true, + wantSame: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + root := fs.NewDirent(ctx, newTestRamfsDir(ctx, nil, nil), "root") + ctx = &rootContext{ + Context: ctx, + root: root, + } + overlay := fs.NewDirent(ctx, fs.NewTestOverlayDir(ctx, tc.upper, tc.lower, tc.revalidate), "overlay") + // Lookup the file twice through the overlay. + first, err := overlay.Walk(ctx, root, fileName) + if err != nil { + t.Fatalf("overlay.Walk(%q) failed: %v", fileName, err) + } + second, err := overlay.Walk(ctx, root, fileName) + if err != nil { + t.Fatalf("overlay.Walk(%q) failed: %v", fileName, err) + } + + if tc.wantSame && first != second { + t.Errorf("dirent lookup got different dirents, wanted same\nfirst=%+v\nsecond=%+v", first, second) + } else if !tc.wantSame && first == second { + t.Errorf("dirent lookup got the same dirent, wanted different: %+v", first) + } + }) + } +} + +func TestCacheFlush(t *testing.T) { + ctx := contexttest.Context(t) + + // Upper and lower each have a file. + upperFileName := "file-from-upper" + lowerFileName := "file-from-lower" + upper := newTestRamfsDir(ctx, []dirContent{{name: upperFileName}}, nil) + lower := newTestRamfsDir(ctx, []dirContent{{name: lowerFileName}}, nil) + + overlay := fs.NewTestOverlayDir(ctx, upper, lower, true /* revalidate */) + + mns, err := fs.NewMountNamespace(ctx, overlay) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + root := mns.Root() + defer root.DecRef() + + ctx = &rootContext{ + Context: ctx, + root: root, + } + + for _, fileName := range []string{upperFileName, lowerFileName} { + // Walk to the file. + maxTraversals := uint(0) + dirent, err := mns.FindInode(ctx, root, nil, fileName, &maxTraversals) + if err != nil { + t.Fatalf("FindInode(%q) failed: %v", fileName, err) + } + + // Get a file from the dirent. + file, err := dirent.Inode.GetFile(ctx, dirent, fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("GetFile() failed: %v", err) + } + + // The dirent should have 3 refs, one from us, one from the + // file, and one from the dirent cache. + // dirent cache. + if got, want := dirent.ReadRefs(), 3; int(got) != want { + t.Errorf("dirent.ReadRefs() got %d want %d", got, want) + } + + // Drop the file reference. + file.DecRef() + + // Dirent should have 2 refs left. + if got, want := dirent.ReadRefs(), 2; int(got) != want { + t.Errorf("dirent.ReadRefs() got %d want %d", got, want) + } + + // Flush the dirent cache. + mns.FlushMountSourceRefs() + + // Dirent should have 1 ref left from the dirent cache. + if got, want := dirent.ReadRefs(), 1; int(got) != want { + t.Errorf("dirent.ReadRefs() got %d want %d", got, want) + } + + // Drop our ref. + dirent.DecRef() + + // We should be back to zero refs. + if got, want := dirent.ReadRefs(), 0; int(got) != want { + t.Errorf("dirent.ReadRefs() got %d want %d", got, want) + } + } + +} + +type dir struct { + fs.InodeOperations + + // List of negative child names. + negative []string + + // ReaddirCalled records whether Readdir was called on a file + // corresponding to this inode. + ReaddirCalled bool +} + +// GetXattr implements InodeOperations.GetXattr. +func (d *dir) GetXattr(_ context.Context, _ *fs.Inode, name string, _ uint64) (string, error) { + for _, n := range d.negative { + if name == fs.XattrOverlayWhiteout(n) { + return "y", nil + } + } + return "", syserror.ENOATTR +} + +// GetFile implements InodeOperations.GetFile. +func (d *dir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + file, err := d.InodeOperations.GetFile(ctx, dirent, flags) + if err != nil { + return nil, err + } + defer file.DecRef() + // Wrap the file's FileOperations in a dirFile. + fops := &dirFile{ + FileOperations: file.FileOperations, + inode: d, + } + return fs.NewFile(ctx, dirent, flags, fops), nil +} + +type dirContent struct { + name string + dir bool +} + +type dirFile struct { + fs.FileOperations + inode *dir +} + +type inode struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeNotVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeStaticFileGetter +} + +// Readdir implements fs.FileOperations.Readdir. It sets the ReaddirCalled +// field on the inode. +func (f *dirFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) { + f.inode.ReaddirCalled = true + return f.FileOperations.Readdir(ctx, file, ser) +} + +func newTestRamfsInode(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + inode := fs.NewInode(ctx, &inode{ + InodeStaticFileGetter: fsutil.InodeStaticFileGetter{ + Contents: []byte("foobar"), + }, + }, msrc, fs.StableAttr{Type: fs.RegularFile}) + return inode +} + +func newTestRamfsDir(ctx context.Context, contains []dirContent, negative []string) *fs.Inode { + msrc := fs.NewPseudoMountSource(ctx) + contents := make(map[string]*fs.Inode) + for _, c := range contains { + if c.dir { + contents[c.name] = newTestRamfsDir(ctx, nil, nil) + } else { + contents[c.name] = newTestRamfsInode(ctx, msrc) + } + } + dops := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermissions{ + User: fs.PermMask{Read: true, Execute: true}, + }) + return fs.NewInode(ctx, &dir{ + InodeOperations: dops, + negative: negative, + }, msrc, fs.StableAttr{Type: fs.Directory}) +} diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go new file mode 100644 index 000000000..e3a715c1f --- /dev/null +++ b/pkg/sentry/fs/inotify.go @@ -0,0 +1,352 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "io" + "sync/atomic" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/uniqueid" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// Inotify represents an inotify instance created by inotify_init(2) or +// inotify_init1(2). Inotify implements the FileOperations interface. +// +// Lock ordering: +// Inotify.mu -> Inode.Watches.mu -> Watch.mu -> Inotify.evMu +// +// +stateify savable +type Inotify struct { + // Unique identifier for this inotify instance. We don't just reuse the + // inotify fd because fds can be duped. These should not be exposed to the + // user, since we may aggressively reuse an id on S/R. + id uint64 + + waiter.Queue `state:"nosave"` + + // evMu *only* protects the events list. We need a separate lock because + // while queuing events, a watch needs to lock the event queue, and using mu + // for that would violate lock ordering since at that point the calling + // goroutine already holds Watch.target.Watches.mu. + evMu sync.Mutex `state:"nosave"` + + // A list of pending events for this inotify instance. Protected by evMu. + events eventList + + // A scratch buffer, use to serialize inotify events. Use allocate this + // ahead of time and reuse performance. Protected by evMu. + scratch []byte + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // The next watch descriptor number to use for this inotify instance. Note + // that Linux starts numbering watch descriptors from 1. + nextWatch int32 + + // Map from watch descriptors to watch objects. + watches map[int32]*Watch +} + +// NewInotify constructs a new Inotify instance. +func NewInotify(ctx context.Context) *Inotify { + return &Inotify{ + id: uniqueid.GlobalFromContext(ctx), + scratch: make([]byte, inotifyEventBaseSize), + nextWatch: 1, // Linux starts numbering watch descriptors from 1. + watches: make(map[int32]*Watch), + } +} + +// Release implements FileOperations.Release. Release removes all watches and +// frees all resources for an inotify instance. +func (i *Inotify) Release() { + // We need to hold i.mu to avoid a race with concurrent calls to + // Inotify.targetDestroyed from Watches. There's no risk of Watches + // accessing this Inotify after the destructor ends, because we remove all + // references to it below. + i.mu.Lock() + defer i.mu.Unlock() + for _, w := range i.watches { + // Remove references to the watch from the watch target. We don't need + // to worry about the references from the owner instance, since we're in + // the owner's destructor. + w.target.Watches.Remove(w.ID()) + // Don't leak any references to the target, held by pins in the watch. + w.destroy() + } +} + +// Readiness implements waiter.Waitable.Readiness. +// +// Readiness indicates whether there are pending events for an inotify instance. +func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask { + ready := waiter.EventMask(0) + + i.evMu.Lock() + defer i.evMu.Unlock() + + if !i.events.Empty() { + ready |= waiter.EventIn + } + + return mask & ready +} + +// Seek implements FileOperations.Seek. +func (*Inotify) Seek(context.Context, *File, SeekWhence, int64) (int64, error) { + return 0, syserror.ESPIPE +} + +// Readdir implements FileOperatons.Readdir. +func (*Inotify) Readdir(context.Context, *File, DentrySerializer) (int64, error) { + return 0, syserror.ENOTDIR +} + +// Write implements FileOperations.Write. +func (*Inotify) Write(context.Context, *File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EBADF +} + +// Read implements FileOperations.Read. +func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() < inotifyEventBaseSize { + return 0, syserror.EINVAL + } + + i.evMu.Lock() + defer i.evMu.Unlock() + + if i.events.Empty() { + // Nothing to read yet, tell caller to block. + return 0, syserror.ErrWouldBlock + } + + var writeLen int64 + for it := i.events.Front(); it != nil; { + event := it + it = it.Next() + + // Does the buffer have enough remaining space to hold the event we're + // about to write out? + if dst.NumBytes() < int64(event.sizeOf()) { + if writeLen > 0 { + // Buffer wasn't big enough for all pending events, but we did + // write some events out. + return writeLen, nil + } + return 0, syserror.EINVAL + } + + // Linux always dequeues an available event as long as there's enough + // buffer space to copy it out, even if the copy below fails. Emulate + // this behaviour. + i.events.Remove(event) + + // Buffer has enough space, copy event to the read buffer. + n, err := event.CopyTo(ctx, i.scratch, dst) + if err != nil { + return 0, err + } + + writeLen += n + dst = dst.DropFirst64(n) + } + return writeLen, nil +} + +// WriteTo implements FileOperations.WriteTo. +func (*Inotify) WriteTo(context.Context, *File, io.Writer, int64, bool) (int64, error) { + return 0, syserror.ENOSYS +} + +// Fsync implements FileOperations.Fsync. +func (*Inotify) Fsync(context.Context, *File, int64, int64, SyncType) error { + return syserror.EINVAL +} + +// ReadFrom implements FileOperations.ReadFrom. +func (*Inotify) ReadFrom(context.Context, *File, io.Reader, int64) (int64, error) { + return 0, syserror.ENOSYS +} + +// Flush implements FileOperations.Flush. +func (*Inotify) Flush(context.Context, *File) error { + return nil +} + +// ConfigureMMap implements FileOperations.ConfigureMMap. +func (*Inotify) ConfigureMMap(context.Context, *File, *memmap.MMapOpts) error { + return syserror.ENODEV +} + +// UnstableAttr implements FileOperations.UnstableAttr. +func (i *Inotify) UnstableAttr(ctx context.Context, file *File) (UnstableAttr, error) { + return file.Dirent.Inode.UnstableAttr(ctx) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (i *Inotify) Ioctl(ctx context.Context, _ *File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch args[1].Int() { + case linux.FIONREAD: + i.evMu.Lock() + defer i.evMu.Unlock() + var n uint32 + for e := i.events.Front(); e != nil; e = e.Next() { + n += uint32(e.sizeOf()) + } + var buf [4]byte + usermem.ByteOrder.PutUint32(buf[:], n) + _, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{}) + return 0, err + + default: + return 0, syserror.ENOTTY + } +} + +func (i *Inotify) queueEvent(ev *Event) { + i.evMu.Lock() + + // Check if we should coalesce the event we're about to queue with the last + // one currently in the queue. Events are coalesced if they are identical. + if last := i.events.Back(); last != nil { + if ev.equals(last) { + // "Coalesce" the two events by simply not queuing the new one. We + // don't need to raise a waiter.EventIn notification because no new + // data is available for reading. + i.evMu.Unlock() + return + } + } + + i.events.PushBack(ev) + + // Release mutex before notifying waiters because we don't control what they + // can do. + i.evMu.Unlock() + + i.Queue.Notify(waiter.EventIn) +} + +// newWatchLocked creates and adds a new watch to target. +func (i *Inotify) newWatchLocked(target *Dirent, mask uint32) *Watch { + wd := i.nextWatch + i.nextWatch++ + + watch := &Watch{ + owner: i, + wd: wd, + mask: mask, + target: target.Inode, + pins: make(map[*Dirent]bool), + } + + i.watches[wd] = watch + + // Grab an extra reference to target to prevent it from being evicted from + // memory. This ref is dropped during either watch removal, target + // destruction, or inotify instance destruction. See callers of Watch.Unpin. + watch.Pin(target) + target.Inode.Watches.Add(watch) + + return watch +} + +// targetDestroyed is called by w to notify i that w's target is gone. This +// automatically generates a watch removal event. +func (i *Inotify) targetDestroyed(w *Watch) { + i.mu.Lock() + _, found := i.watches[w.wd] + delete(i.watches, w.wd) + i.mu.Unlock() + + if found { + i.queueEvent(newEvent(w.wd, "", linux.IN_IGNORED, 0)) + } +} + +// AddWatch constructs a new inotify watch and adds it to the target dirent. It +// returns the watch descriptor returned by inotify_add_watch(2). +func (i *Inotify) AddWatch(target *Dirent, mask uint32) int32 { + // Note: Locking this inotify instance protects the result returned by + // Lookup() below. With the lock held, we know for sure the lookup result + // won't become stale because it's impossible for *this* instance to + // add/remove watches on target. + i.mu.Lock() + defer i.mu.Unlock() + + // Does the target already have a watch from this inotify instance? + if existing := target.Inode.Watches.Lookup(i.id); existing != nil { + // This may be a watch on a different dirent pointing to the + // same inode. Obtain an extra reference if necessary. + existing.Pin(target) + + newmask := mask + if mergeMask := mask&linux.IN_MASK_ADD != 0; mergeMask { + // "Add (OR) events to watch mask for this pathname if it already + // exists (instead of replacing mask)." -- inotify(7) + newmask |= atomic.LoadUint32(&existing.mask) + } + atomic.StoreUint32(&existing.mask, newmask) + return existing.wd + } + + // No existing watch, create a new watch. + watch := i.newWatchLocked(target, mask) + return watch.wd +} + +// RmWatch implements watcher.Watchable.RmWatch. +// +// RmWatch looks up an inotify watch for the given 'wd' and configures the +// target dirent to stop sending events to this inotify instance. +func (i *Inotify) RmWatch(wd int32) error { + i.mu.Lock() + + // Find the watch we were asked to removed. + watch, ok := i.watches[wd] + if !ok { + i.mu.Unlock() + return syserror.EINVAL + } + + // Remove the watch from this instance. + delete(i.watches, wd) + + // Remove the watch from the watch target. + watch.target.Watches.Remove(watch.ID()) + + // The watch is now isolated and we can safely drop the instance lock. We + // need to do so because watch.destroy() acquires Watch.mu, which cannot be + // acquired with Inotify.mu held. + i.mu.Unlock() + + // Generate the event for the removal. + i.queueEvent(newEvent(watch.wd, "", linux.IN_IGNORED, 0)) + + // Remove all pins. + watch.destroy() + + return nil +} diff --git a/pkg/sentry/fs/inotify_event.go b/pkg/sentry/fs/inotify_event.go new file mode 100644 index 000000000..686e1b1cd --- /dev/null +++ b/pkg/sentry/fs/inotify_event.go @@ -0,0 +1,139 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "bytes" + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/usermem" +) + +// inotifyEventBaseSize is the base size of linux's struct inotify_event. This +// must be a power 2 for rounding below. +const inotifyEventBaseSize = 16 + +// Event represents a struct inotify_event from linux. +// +// +stateify savable +type Event struct { + eventEntry + + wd int32 + mask uint32 + cookie uint32 + + // len is computed based on the name field is set automatically by + // Event.setName. It should be 0 when no name is set; otherwise it is the + // length of the name slice. + len uint32 + + // The name field has special padding requirements and should only be set by + // calling Event.setName. + name []byte +} + +func newEvent(wd int32, name string, events, cookie uint32) *Event { + e := &Event{ + wd: wd, + mask: events, + cookie: cookie, + } + if name != "" { + e.setName(name) + } + return e +} + +// paddedBytes converts a go string to a null-terminated c-string, padded with +// null bytes to a total size of 'l'. 'l' must be large enough for all the bytes +// in the 's' plus at least one null byte. +func paddedBytes(s string, l uint32) []byte { + if l < uint32(len(s)+1) { + panic("Converting string to byte array results in truncation, this can lead to buffer-overflow due to the missing null-byte!") + } + b := make([]byte, l) + copy(b, s) + + // b was zero-value initialized during make(), so the rest of the slice is + // already filled with null bytes. + + return b +} + +// setName sets the optional name for this event. +func (e *Event) setName(name string) { + // We need to pad the name such that the entire event length ends up a + // multiple of inotifyEventBaseSize. + unpaddedLen := len(name) + 1 + // Round up to nearest multiple of inotifyEventBaseSize. + e.len = uint32((unpaddedLen + inotifyEventBaseSize - 1) & ^(inotifyEventBaseSize - 1)) + // Make sure we haven't overflowed and wrapped around when rounding. + if unpaddedLen > int(e.len) { + panic("Overflow when rounding inotify event size, the 'name' field was too big.") + } + e.name = paddedBytes(name, e.len) +} + +func (e *Event) sizeOf() int { + s := inotifyEventBaseSize + int(e.len) + if s < inotifyEventBaseSize { + panic("overflow") + } + return s +} + +// CopyTo serializes this event to dst. buf is used as a scratch buffer to +// construct the output. We use a buffer allocated ahead of time for +// performance. buf must be at least inotifyEventBaseSize bytes. +func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) { + usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) + usermem.ByteOrder.PutUint32(buf[4:], e.mask) + usermem.ByteOrder.PutUint32(buf[8:], e.cookie) + usermem.ByteOrder.PutUint32(buf[12:], e.len) + + writeLen := 0 + + n, err := dst.CopyOut(ctx, buf) + if err != nil { + return 0, err + } + writeLen += n + dst = dst.DropFirst(n) + + if e.len > 0 { + n, err = dst.CopyOut(ctx, e.name) + if err != nil { + return 0, err + } + writeLen += n + } + + // Santiy check. + if writeLen != e.sizeOf() { + panic(fmt.Sprintf("Serialized unexpected amount of data for an event, expected %v, wrote %v.", e.sizeOf(), writeLen)) + } + + return int64(writeLen), nil +} + +func (e *Event) equals(other *Event) bool { + return e.wd == other.wd && + e.mask == other.mask && + e.cookie == other.cookie && + e.len == other.len && + bytes.Equal(e.name, other.name) +} diff --git a/pkg/sentry/fs/inotify_watch.go b/pkg/sentry/fs/inotify_watch.go new file mode 100644 index 000000000..900cba3ca --- /dev/null +++ b/pkg/sentry/fs/inotify_watch.go @@ -0,0 +1,135 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sync/atomic" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/sync" +) + +// Watch represent a particular inotify watch created by inotify_add_watch. +// +// While a watch is active, it ensures the target inode is pinned in memory by +// holding an extra ref on each dirent known (by inotify) to point to the +// inode. These are known as pins. For a full discussion, see +// fs/g3doc/inotify.md. +// +// +stateify savable +type Watch struct { + // Inotify instance which owns this watch. + owner *Inotify + + // Descriptor for this watch. This is unique across an inotify instance. + wd int32 + + // The inode being watched. Note that we don't directly hold a reference on + // this inode. Instead we hold a reference on the dirent(s) containing the + // inode, which we record in pins. + target *Inode + + // unpinned indicates whether we have a hard reference on target. This field + // may only be modified through atomic ops. + unpinned uint32 + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // Events being monitored via this watch. Must be accessed atomically, + // writes are protected by mu. + mask uint32 + + // pins is the set of dirents this watch is currently pinning in memory by + // holding a reference to them. See Pin()/Unpin(). + pins map[*Dirent]bool +} + +// ID returns the id of the inotify instance that owns this watch. +func (w *Watch) ID() uint64 { + return w.owner.id +} + +// NotifyParentAfterUnlink indicates whether the parent of the watched object +// should continue to be be notified of events after the target has been +// unlinked. +func (w *Watch) NotifyParentAfterUnlink() bool { + return atomic.LoadUint32(&w.mask)&linux.IN_EXCL_UNLINK == 0 +} + +// isRenameEvent returns true if eventMask describes a rename event. +func isRenameEvent(eventMask uint32) bool { + return eventMask&(linux.IN_MOVED_FROM|linux.IN_MOVED_TO|linux.IN_MOVE_SELF) != 0 +} + +// Notify queues a new event on this watch. +func (w *Watch) Notify(name string, events uint32, cookie uint32) { + mask := atomic.LoadUint32(&w.mask) + if mask&events == 0 { + // We weren't watching for this event. + return + } + + // Event mask should include bits matched from the watch plus all control + // event bits. + unmaskableBits := ^uint32(0) &^ linux.IN_ALL_EVENTS + effectiveMask := unmaskableBits | mask + matchedEvents := effectiveMask & events + w.owner.queueEvent(newEvent(w.wd, name, matchedEvents, cookie)) +} + +// Pin acquires a new ref on dirent, which pins the dirent in memory while +// the watch is active. Calling Pin for a second time on the same dirent for +// the same watch is a no-op. +func (w *Watch) Pin(d *Dirent) { + w.mu.Lock() + defer w.mu.Unlock() + if !w.pins[d] { + w.pins[d] = true + d.IncRef() + } +} + +// Unpin drops any extra refs held on dirent due to a previous Pin +// call. Calling Unpin multiple times for the same dirent, or on a dirent +// without a corresponding Pin call is a no-op. +func (w *Watch) Unpin(d *Dirent) { + w.mu.Lock() + defer w.mu.Unlock() + if w.pins[d] { + delete(w.pins, d) + d.DecRef() + } +} + +// TargetDestroyed notifies the owner of the watch that the watch target is +// gone. The owner should release its own references to the watcher upon +// receiving this notification. +func (w *Watch) TargetDestroyed() { + w.owner.targetDestroyed(w) +} + +// destroy prepares the watch for destruction. It unpins all dirents pinned by +// this watch. Destroy does not cause any new events to be generated. The caller +// is responsible for ensuring there are no outstanding references to this +// watch. +func (w *Watch) destroy() { + w.mu.Lock() + defer w.mu.Unlock() + for d := range w.pins { + d.DecRef() + } + w.pins = nil +} diff --git a/pkg/sentry/fs/lock/BUILD b/pkg/sentry/fs/lock/BUILD new file mode 100644 index 000000000..ae3331737 --- /dev/null +++ b/pkg/sentry/fs/lock/BUILD @@ -0,0 +1,58 @@ +load("//tools:defs.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +package(licenses = ["notice"]) + +go_template_instance( + name = "lock_range", + out = "lock_range.go", + package = "lock", + prefix = "Lock", + template = "//pkg/segment:generic_range", + types = { + "T": "uint64", + }, +) + +go_template_instance( + name = "lock_set", + out = "lock_set.go", + consts = { + "minDegree": "3", + }, + package = "lock", + prefix = "Lock", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "LockRange", + "Value": "Lock", + "Functions": "lockSetFunctions", + }, +) + +go_library( + name = "lock", + srcs = [ + "lock.go", + "lock_range.go", + "lock_set.go", + "lock_set_functions.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/sync", + "//pkg/waiter", + ], +) + +go_test( + name = "lock_test", + size = "small", + srcs = [ + "lock_range_test.go", + "lock_test.go", + ], + library = ":lock", +) diff --git a/pkg/sentry/fs/lock/lock.go b/pkg/sentry/fs/lock/lock.go new file mode 100644 index 000000000..8a5d9c7eb --- /dev/null +++ b/pkg/sentry/fs/lock/lock.go @@ -0,0 +1,453 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lock is the API for POSIX-style advisory regional file locks and +// BSD-style full file locks. +// +// Callers needing to enforce these types of locks, like sys_fcntl, can call +// LockRegion and UnlockRegion on a thread-safe set of Locks. Locks are +// specific to a unique file (unique device/inode pair) and for this reason +// should not be shared between files. +// +// A Lock has a set of holders identified by UniqueID. Normally this is the +// pid of the thread attempting to acquire the lock. +// +// Since these are advisory locks, they do not need to be integrated into +// Reads/Writes and for this reason there is no way to *check* if a lock is +// held. One can only attempt to take a lock or unlock an existing lock. +// +// A Lock in a set of Locks is typed: it is either a read lock with any number +// of readers and no writer, or a write lock with no readers. +// +// As expected from POSIX, any attempt to acquire a write lock on a file region +// when there already exits a write lock held by a different uid will fail. Any +// attempt to acquire a write lock on a file region when there is more than one +// reader will fail. Any attempt to acquire a read lock on a file region when +// there is already a writer will fail. +// +// In special cases, a read lock may be upgraded to a write lock and a write lock +// can be downgraded to a read lock. This can only happen if: +// +// * read lock upgrade to write lock: There can be only one reader and the reader +// must be the same as the requested write lock holder. +// +// * write lock downgrade to read lock: The writer must be the same as the requested +// read lock holder. +// +// UnlockRegion always succeeds. If LockRegion fails the caller should normally +// interpret this as "try again later". +package lock + +import ( + "fmt" + "math" + "syscall" + + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LockType is a type of regional file lock. +type LockType int + +// UniqueID is a unique identifier of the holder of a regional file lock. +type UniqueID interface{} + +const ( + // ReadLock describes a POSIX regional file lock to be taken + // read only. There may be multiple of these locks on a single + // file region as long as there is no writer lock on the same + // region. + ReadLock LockType = iota + + // WriteLock describes a POSIX regional file lock to be taken + // write only. There may be only a single holder of this lock + // and no read locks. + WriteLock +) + +// LockEOF is the maximal possible end of a regional file lock. +// +// A BSD-style full file lock can be represented as a regional file lock from +// offset 0 to LockEOF. +const LockEOF = math.MaxUint64 + +// Lock is a regional file lock. It consists of either a single writer +// or a set of readers. +// +// A Lock may be upgraded from a read lock to a write lock only if there +// is a single reader and that reader has the same uid as the write lock. +// +// A Lock may be downgraded from a write lock to a read lock only if +// the write lock's uid is the same as the read lock. +// +// +stateify savable +type Lock struct { + // Readers are the set of read lock holders identified by UniqueID. + // If len(Readers) > 0 then HasWriter must be false. + Readers map[UniqueID]bool + + // Writer holds the writer unique ID. It's nil if there are no writers. + Writer UniqueID +} + +// Locks is a thread-safe wrapper around a LockSet. +// +// +stateify savable +type Locks struct { + // mu protects locks below. + mu sync.Mutex `state:"nosave"` + + // locks is the set of region locks currently held on an Inode. + locks LockSet + + // blockedQueue is the queue of waiters that are waiting on a lock. + blockedQueue waiter.Queue `state:"zerovalue"` +} + +// Blocker is the interface used for blocking locks. Passing a nil Blocker +// will be treated as non-blocking. +type Blocker interface { + Block(C <-chan struct{}) error +} + +const ( + // EventMaskAll is the mask we will always use for locks, by using the + // same mask all the time we can wake up everyone anytime the lock + // changes state. + EventMaskAll waiter.EventMask = 0xFFFF +) + +// LockRegion attempts to acquire a typed lock for the uid on a region +// of a file. Returns true if successful in locking the region. If false +// is returned, the caller should normally interpret this as "try again later" if +// acquiring the lock in a non-blocking mode or "interrupted" if in a blocking mode. +// Blocker is the interface used to provide blocking behavior, passing a nil Blocker +// will result in non-blocking behavior. +func (l *Locks) LockRegion(uid UniqueID, t LockType, r LockRange, block Blocker) bool { + for { + l.mu.Lock() + + // Blocking locks must run in a loop because we'll be woken up whenever an unlock event + // happens for this lock. We will then attempt to take the lock again and if it fails + // continue blocking. + res := l.locks.lock(uid, t, r) + if !res && block != nil { + e, ch := waiter.NewChannelEntry(nil) + l.blockedQueue.EventRegister(&e, EventMaskAll) + l.mu.Unlock() + if err := block.Block(ch); err != nil { + // We were interrupted, the caller can translate this to EINTR if applicable. + l.blockedQueue.EventUnregister(&e) + return false + } + l.blockedQueue.EventUnregister(&e) + continue // Try again now that someone has unlocked. + } + + l.mu.Unlock() + return res + } +} + +// UnlockRegion attempts to release a lock for the uid on a region of a file. +// This operation is always successful, even if there did not exist a lock on +// the requested region held by uid in the first place. +func (l *Locks) UnlockRegion(uid UniqueID, r LockRange) { + l.mu.Lock() + defer l.mu.Unlock() + l.locks.unlock(uid, r) + + // Now that we've released the lock, we need to wake up any waiters. + l.blockedQueue.Notify(EventMaskAll) +} + +// makeLock returns a new typed Lock that has either uid as its only reader +// or uid as its only writer. +func makeLock(uid UniqueID, t LockType) Lock { + value := Lock{Readers: make(map[UniqueID]bool)} + switch t { + case ReadLock: + value.Readers[uid] = true + case WriteLock: + value.Writer = uid + default: + panic(fmt.Sprintf("makeLock: invalid lock type %d", t)) + } + return value +} + +// isHeld returns true if uid is a holder of Lock. +func (l Lock) isHeld(uid UniqueID) bool { + return l.Writer == uid || l.Readers[uid] +} + +// lock sets uid as a holder of a typed lock on Lock. +// +// Preconditions: canLock is true for the range containing this Lock. +func (l *Lock) lock(uid UniqueID, t LockType) { + switch t { + case ReadLock: + // If we are already a reader, then this is a no-op. + if l.Readers[uid] { + return + } + // We cannot downgrade a write lock to a read lock unless the + // uid is the same. + if l.Writer != nil { + if l.Writer != uid { + panic(fmt.Sprintf("lock: cannot downgrade write lock to read lock for uid %d, writer is %d", uid, l.Writer)) + } + // Ensure that there is only one reader if upgrading. + l.Readers = make(map[UniqueID]bool) + // Ensure that there is no longer a writer. + l.Writer = nil + } + l.Readers[uid] = true + return + case WriteLock: + // If we are already the writer, then this is a no-op. + if l.Writer == uid { + return + } + // We can only upgrade a read lock to a write lock if there + // is only one reader and that reader has the same uid as + // the write lock. + if readers := len(l.Readers); readers > 0 { + if readers != 1 { + panic(fmt.Sprintf("lock: cannot upgrade read lock to write lock for uid %d, too many readers %v", uid, l.Readers)) + } + if !l.Readers[uid] { + panic(fmt.Sprintf("lock: cannot upgrade read lock to write lock for uid %d, conflicting reader %v", uid, l.Readers)) + } + } + // Ensure that there is only a writer. + l.Readers = make(map[UniqueID]bool) + l.Writer = uid + default: + panic(fmt.Sprintf("lock: invalid lock type %d", t)) + } +} + +// lockable returns true if check returns true for every Lock in LockRange. +// Further, check should return true if Lock meets the callers requirements +// for locking Lock. +func (l LockSet) lockable(r LockRange, check func(value Lock) bool) bool { + // Get our starting point. + seg := l.LowerBoundSegment(r.Start) + for seg.Ok() && seg.Start() < r.End { + // Note that we don't care about overruning the end of the + // last segment because if everything checks out we'll just + // split the last segment. + if !check(seg.Value()) { + return false + } + // Jump to the next segment, ignoring gaps, for the same + // reason we ignored the first gap. + seg = seg.NextSegment() + } + // No conflict, we can get a lock for uid over the entire range. + return true +} + +// canLock returns true if uid will be able to take a Lock of type t on the +// entire range specified by LockRange. +func (l LockSet) canLock(uid UniqueID, t LockType, r LockRange) bool { + switch t { + case ReadLock: + return l.lockable(r, func(value Lock) bool { + // If there is no writer, there's no problem adding another reader. + if value.Writer == nil { + return true + } + // If there is a writer, then it must be the same uid + // in order to downgrade the lock to a read lock. + return value.Writer == uid + }) + case WriteLock: + return l.lockable(r, func(value Lock) bool { + // If there are only readers. + if value.Writer == nil { + // Then this uid can only take a write lock if this is a private + // upgrade, meaning that the only reader is uid. + return len(value.Readers) == 1 && value.Readers[uid] + } + // If the uid is already a writer on this region, then + // adding a write lock would be a no-op. + return value.Writer == uid + }) + default: + panic(fmt.Sprintf("canLock: invalid lock type %d", t)) + } +} + +// lock returns true if uid took a lock of type t on the entire range of +// LockRange. +// +// Preconditions: r.Start <= r.End (will panic otherwise). +func (l *LockSet) lock(uid UniqueID, t LockType, r LockRange) bool { + if r.Start > r.End { + panic(fmt.Sprintf("lock: r.Start %d > r.End %d", r.Start, r.End)) + } + + // Don't attempt to insert anything with a range of 0 and treat this + // as a successful no-op. + if r.Length() == 0 { + return true + } + + // Do a first-pass check. We *could* hold onto the segments we + // checked if canLock would return true, but traversing the segment + // set should be fast and this keeps things simple. + if !l.canLock(uid, t, r) { + return false + } + // Get our starting point. + seg, gap := l.Find(r.Start) + if gap.Ok() { + // Fill in the gap and get the next segment to modify. + seg = l.Insert(gap, gap.Range().Intersect(r), makeLock(uid, t)).NextSegment() + } else if seg.Start() < r.Start { + // Get our first segment to modify. + _, seg = l.Split(seg, r.Start) + } + for seg.Ok() && seg.Start() < r.End { + // Split the last one if necessary. + if seg.End() > r.End { + seg, _ = l.SplitUnchecked(seg, r.End) + } + + // Set the lock on the segment. This is guaranteed to + // always be safe, given canLock above. + value := seg.ValuePtr() + value.lock(uid, t) + + // Fill subsequent gaps. + gap = seg.NextGap() + if gr := gap.Range().Intersect(r); gr.Length() > 0 { + seg = l.Insert(gap, gr, makeLock(uid, t)).NextSegment() + } else { + seg = gap.NextSegment() + } + } + return true +} + +// unlock is always successful. If uid has no locks held for the range LockRange, +// unlock is a no-op. +// +// Preconditions: same as lock. +func (l *LockSet) unlock(uid UniqueID, r LockRange) { + if r.Start > r.End { + panic(fmt.Sprintf("unlock: r.Start %d > r.End %d", r.Start, r.End)) + } + + // Same as setlock. + if r.Length() == 0 { + return + } + + // Get our starting point. + seg := l.LowerBoundSegment(r.Start) + for seg.Ok() && seg.Start() < r.End { + // If this segment doesn't have a lock from uid then + // there is no need to fragment the set with Isolate (below). + // In this case just move on to the next segment. + if !seg.Value().isHeld(uid) { + seg = seg.NextSegment() + continue + } + + // Ensure that if we need to unlock a sub-segment that + // we don't unlock/remove that entire segment. + seg = l.Isolate(seg, r) + + value := seg.Value() + var remove bool + if value.Writer == uid { + // If we are unlocking a writer, then since there can + // only ever be one writer and no readers, then this + // lock should always be removed from the set. + remove = true + } else if value.Readers[uid] { + // If uid is the last reader, then just remove the entire + // segment. + if len(value.Readers) == 1 { + remove = true + } else { + // Otherwise we need to remove this reader without + // affecting any other segment's readers. To do + // this, we need to make a copy of the Readers map + // and not add this uid. + newValue := Lock{Readers: make(map[UniqueID]bool)} + for k, v := range value.Readers { + if k != uid { + newValue.Readers[k] = v + } + } + seg.SetValue(newValue) + } + } + if remove { + seg = l.Remove(seg).NextSegment() + } else { + seg = seg.NextSegment() + } + } +} + +// ComputeRange takes a positive file offset and computes the start of a LockRange +// using start (relative to offset) and the end of the LockRange using length. The +// values of start and length may be negative but the resulting LockRange must +// preserve that LockRange.Start < LockRange.End and LockRange.Start > 0. +func ComputeRange(start, length, offset int64) (LockRange, error) { + offset += start + // fcntl(2): "l_start can be a negative number provided the offset + // does not lie before the start of the file" + if offset < 0 { + return LockRange{}, syscall.EINVAL + } + + // fcntl(2): Specifying 0 for l_len has the special meaning: lock all + // bytes starting at the location specified by l_whence and l_start + // through to the end of file, no matter how large the file grows. + end := uint64(LockEOF) + if length > 0 { + // fcntl(2): If l_len is positive, then the range to be locked + // covers bytes l_start up to and including l_start+l_len-1. + // + // Since LockRange.End is exclusive we need not -1 from length.. + end = uint64(offset + length) + } else if length < 0 { + // fcntl(2): If l_len is negative, the interval described by + // lock covers bytes l_start+l_len up to and including l_start-1. + // + // Since LockRange.End is exclusive we need not -1 from offset. + signedEnd := offset + // Add to offset using a negative length (subtract). + offset += length + if offset < 0 { + return LockRange{}, syscall.EINVAL + } + if signedEnd < offset { + return LockRange{}, syscall.EOVERFLOW + } + // At this point signedEnd cannot be negative, + // since we asserted that offset is not negative + // and it is not less than offset. + end = uint64(signedEnd) + } + // Offset is guaranteed to be positive at this point. + return LockRange{Start: uint64(offset), End: end}, nil +} diff --git a/pkg/sentry/fs/lock/lock_range_test.go b/pkg/sentry/fs/lock/lock_range_test.go new file mode 100644 index 000000000..6221199d1 --- /dev/null +++ b/pkg/sentry/fs/lock/lock_range_test.go @@ -0,0 +1,136 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "syscall" + "testing" +) + +func TestComputeRange(t *testing.T) { + tests := []struct { + // Description of test. + name string + + // Requested start of the lock range. + start int64 + + // Requested length of the lock range, + // can be negative :( + length int64 + + // Pre-computed file offset based on whence. + // Will be added to start. + offset int64 + + // Expected error. + err error + + // If error is nil, the expected LockRange. + LockRange + }{ + { + name: "offset, start, and length all zero", + LockRange: LockRange{Start: 0, End: LockEOF}, + }, + { + name: "zero offset, zero start, positive length", + start: 0, + length: 4096, + offset: 0, + LockRange: LockRange{Start: 0, End: 4096}, + }, + { + name: "zero offset, negative start", + start: -4096, + offset: 0, + err: syscall.EINVAL, + }, + { + name: "large offset, negative start, positive length", + start: -2048, + length: 2048, + offset: 4096, + LockRange: LockRange{Start: 2048, End: 4096}, + }, + { + name: "large offset, negative start, zero length", + start: -2048, + length: 0, + offset: 4096, + LockRange: LockRange{Start: 2048, End: LockEOF}, + }, + { + name: "zero offset, zero start, negative length", + start: 0, + length: -4096, + offset: 0, + err: syscall.EINVAL, + }, + { + name: "large offset, zero start, negative length", + start: 0, + length: -4096, + offset: 4096, + LockRange: LockRange{Start: 0, End: 4096}, + }, + { + name: "offset, start, and length equal, length is negative", + start: 1024, + length: -1024, + offset: 1024, + LockRange: LockRange{Start: 1024, End: 2048}, + }, + { + name: "offset, start, and length equal, start is negative", + start: -1024, + length: 1024, + offset: 1024, + LockRange: LockRange{Start: 0, End: 1024}, + }, + { + name: "offset, start, and length equal, offset is negative", + start: 1024, + length: 1024, + offset: -1024, + LockRange: LockRange{Start: 0, End: 1024}, + }, + { + name: "offset, start, and length equal, all negative", + start: -1024, + length: -1024, + offset: -1024, + err: syscall.EINVAL, + }, + { + name: "offset, start, and length equal, all positive", + start: 1024, + length: 1024, + offset: 1024, + LockRange: LockRange{Start: 2048, End: 3072}, + }, + } + + for _, test := range tests { + rng, err := ComputeRange(test.start, test.length, test.offset) + if err != test.err { + t.Errorf("%s: lockRange(%d, %d, %d) got error %v, want %v", test.name, test.start, test.length, test.offset, err, test.err) + continue + } + if err == nil && rng != test.LockRange { + t.Errorf("%s: lockRange(%d, %d, %d) got LockRange %v, want %v", test.name, test.start, test.length, test.offset, rng, test.LockRange) + } + } +} diff --git a/pkg/sentry/fs/lock/lock_set_functions.go b/pkg/sentry/fs/lock/lock_set_functions.go new file mode 100644 index 000000000..50a16e662 --- /dev/null +++ b/pkg/sentry/fs/lock/lock_set_functions.go @@ -0,0 +1,63 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "math" +) + +// LockSet maps a set of Locks into a file. The key is the file offset. + +type lockSetFunctions struct{} + +func (lockSetFunctions) MinKey() uint64 { + return 0 +} + +func (lockSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +func (lockSetFunctions) ClearValue(l *Lock) { + *l = Lock{} +} + +func (lockSetFunctions) Merge(r1 LockRange, val1 Lock, r2 LockRange, val2 Lock) (Lock, bool) { + // Merge only if the Readers/Writers are identical. + if len(val1.Readers) != len(val2.Readers) { + return Lock{}, false + } + for k := range val1.Readers { + if !val2.Readers[k] { + return Lock{}, false + } + } + if val1.Writer != val2.Writer { + return Lock{}, false + } + return val1, true +} + +func (lockSetFunctions) Split(r LockRange, val Lock, split uint64) (Lock, Lock) { + // Copy the segment so that split segments don't contain map references + // to other segments. + val0 := Lock{Readers: make(map[UniqueID]bool)} + for k, v := range val.Readers { + val0.Readers[k] = v + } + val0.Writer = val.Writer + + return val, val0 +} diff --git a/pkg/sentry/fs/lock/lock_test.go b/pkg/sentry/fs/lock/lock_test.go new file mode 100644 index 000000000..fad90984b --- /dev/null +++ b/pkg/sentry/fs/lock/lock_test.go @@ -0,0 +1,1060 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "reflect" + "testing" +) + +type entry struct { + Lock + LockRange +} + +func equals(e0, e1 []entry) bool { + if len(e0) != len(e1) { + return false + } + for i := range e0 { + for k := range e0[i].Lock.Readers { + if !e1[i].Lock.Readers[k] { + return false + } + } + for k := range e1[i].Lock.Readers { + if !e0[i].Lock.Readers[k] { + return false + } + } + if !reflect.DeepEqual(e0[i].LockRange, e1[i].LockRange) { + return false + } + if e0[i].Lock.Writer != e1[i].Lock.Writer { + return false + } + } + return true +} + +// fill a LockSet with consecutive region locks. Will panic if +// LockRanges are not consecutive. +func fill(entries []entry) LockSet { + l := LockSet{} + for _, e := range entries { + gap := l.FindGap(e.LockRange.Start) + if !gap.Ok() { + panic("cannot insert into existing segment") + } + l.Insert(gap, e.LockRange, e.Lock) + } + return l +} + +func TestCanLockEmpty(t *testing.T) { + l := LockSet{} + + // Expect to be able to take any locks given that the set is empty. + eof := l.FirstGap().End() + r := LockRange{0, eof} + if !l.canLock(1, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1) + } + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + if !l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2) + } +} + +func TestCanLock(t *testing.T) { + // + -------------- + ---------- + -------------- + --------- + + // | Readers 1 & 2 | Readers 1 | Readers 1 & 3 | Writer 1 | + // + ------------- + ---------- + -------------- + --------- + + // 0 1024 2048 3072 4096 + l := fill([]entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 3: true}}, + LockRange: LockRange{2048, 3072}, + }, + { + Lock: Lock{Writer: 1}, + LockRange: LockRange{3072, 4096}, + }, + }) + + // Now that we have a mildly interesting layout, try some checks on different + // ranges, uids, and lock types. + // + // Expect to be able to extend the read lock, despite the writer lock, because + // the writer has the same uid as the requested read lock. + r := LockRange{0, 8192} + if !l.canLock(1, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1) + } + // Expect to *not* be able to extend the read lock since there is an overlapping + // writer region locked by someone other than the uid. + if l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", ReadLock, r, 2) + } + // Expect to be able to extend the read lock if there are only other readers in + // the way. + r = LockRange{64, 3072} + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + // Expect to be able to set a read lock beyond the range of any existing locks. + r = LockRange{4096, 10240} + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + + // Expect to not be able to take a write lock with other readers in the way. + r = LockRange{0, 8192} + if l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 1) + } + // Expect to be able to extend the write lock for the same uid. + r = LockRange{3072, 8192} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + // Expect to not be able to overlap a write lock for two different uids. + if l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 2) + } + // Expect to be able to set a write lock that is beyond the range of any + // existing locks. + r = LockRange{8192, 10240} + if !l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2) + } + // Expect to be able to upgrade a read lock (any portion of it). + r = LockRange{1024, 2048} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + r = LockRange{1080, 2000} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } +} + +func TestSetLock(t *testing.T) { + tests := []struct { + // description of test. + name string + + // LockSet entries to pre-fill. + before []entry + + // Description of region to lock: + // + // start is the file offset of the lock. + start uint64 + // end is the end file offset of the lock. + end uint64 + // uid of lock attempter. + uid UniqueID + // lock type requested. + lockType LockType + + // success is true if taking the above + // lock should succeed. + success bool + + // Expected layout of the set after locking + // if success is true. + after []entry + }{ + { + name: "set zero length ReadLock on empty set", + start: 0, + end: 0, + uid: 0, + lockType: ReadLock, + success: true, + }, + { + name: "set zero length WriteLock on empty set", + start: 0, + end: 0, + uid: 0, + lockType: WriteLock, + success: true, + }, + { + name: "set ReadLock on empty set", + start: 0, + end: LockEOF, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "set WriteLock on empty set", + start: 0, + end: LockEOF, + uid: 0, + lockType: WriteLock, + success: true, + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "set ReadLock on WriteLock same uid", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------- + --------------------------- + + // | Readers 0 | Writer 0 | + // + ----------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "set WriteLock on ReadLock same uid", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + lockType: WriteLock, + success: true, + // + ----------- + --------------------------- + + // | Writer 0 | Readers 0 | + // + ----------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "set ReadLock on WriteLock different uid", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: false, + }, + { + name: "set WriteLock on ReadLock different uid", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: WriteLock, + success: false, + }, + { + name: "split ReadLock for overlapping lock at start 0", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: true, + // + -------------- + --------------------------- + + // | Readers 0 & 1 | Readers 0 | + // + -------------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "split ReadLock for overlapping lock at non-zero start", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: 8192, + uid: 1, + lockType: ReadLock, + success: true, + // + ---------- + -------------- + ----------- + + // | Readers 0 | Readers 0 & 1 | Readers 0 | + // + ---------- + -------------- + ----------- + + // 0 4096 8192 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, 8192}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{8192, LockEOF}, + }, + }, + }, + { + name: "fill front gap with ReadLock", + // + --------- + ---------------------------- + + // | gap | Readers 0 | + // + --------- + ---------------------------- + + // 0 1024 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + start: 0, + end: 8192, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "fill end gap with ReadLock", + // + ---------------------------- + + // | Readers 0 | + // + ---------------------------- + + // 0 4096 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + }, + start: 1024, + end: LockEOF, + uid: 0, + lockType: ReadLock, + success: true, + // Note that this is not merged after lock does a Split. This is + // fine because the two locks will still *behave* as one. In other + // words we can fragment any lock all we want and semantically it + // makes no difference. + // + // + ----------- + --------------------------- + + // | Readers 0 | Readers 0 | + // + ----------- + --------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + }, + { + name: "fill gap with ReadLock and split", + // + --------- + ---------------------------- + + // | gap | Readers 0 | + // + --------- + ---------------------------- + + // 0 1024 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: true, + // + --------- + ------------- + ------------- + + // | Reader 1 | Readers 0 & 1 | Reader 0 | + // + ----------+ ------------- + ------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "upgrade ReadLock to WriteLock for single uid fill gap", + // + ------------- + --------- + --- + ------------- + + // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 | + // + ------------- + --------- + --- + ------------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + lockType: WriteLock, + success: true, + // + ------------- + -------- + ------------- + + // | Readers 0 & 1 | Writer 0 | Readers 0 & 2 | + // + ------------- + -------- + ------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "upgrade ReadLock to WriteLock for single uid keep gap", + // + ------------- + --------- + --- + ------------- + + // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 | + // + ------------- + --------- + --- + ------------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 1024, + end: 3072, + uid: 0, + lockType: WriteLock, + success: true, + // + ------------- + -------- + --- + ------------- + + // | Readers 0 & 1 | Writer 0 | gap | Readers 0 & 2 | + // + ------------- + -------- + --- + ------------- + + // 0 1024 3072 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{1024, 3072}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "fail to upgrade ReadLock to WriteLock with conflicting Reader", + // + ------------- + --------- + + // | Readers 0 & 1 | Readers 0 | + // + ------------- + --------- + + // 0 1024 2048 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + }, + start: 0, + end: 2048, + uid: 0, + lockType: WriteLock, + success: false, + }, + { + name: "take WriteLock on whole file if all uids are the same", + // + ------------- + --------- + --------- + ---------- + + // | Writer 0 | Readers 0 | Readers 0 | Readers 0 | + // + ------------- + --------- + --------- + ---------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{2048, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + lockType: WriteLock, + success: true, + // We do not manually merge locks. Semantically a fragmented lock + // held by the same uid will behave as one lock so it makes no difference. + // + // + ------------- + ---------------------------- + + // | Writer 0 | Writer 0 | + // + ------------- + ---------------------------- + + // 0 1024 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + l := fill(test.before) + + r := LockRange{Start: test.start, End: test.end} + success := l.lock(test.uid, test.lockType, r) + var got []entry + for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + got = append(got, entry{ + Lock: seg.Value(), + LockRange: seg.Range(), + }) + } + + if success != test.success { + t.Errorf("setlock(%v, %+v, %d, %d) got success %v, want %v", test.before, r, test.uid, test.lockType, success, test.success) + return + } + + if success { + if !equals(got, test.after) { + t.Errorf("got set %+v, want %+v", got, test.after) + } + } + }) + } +} + +func TestUnlock(t *testing.T) { + tests := []struct { + // description of test. + name string + + // LockSet entries to pre-fill. + before []entry + + // Description of region to unlock: + // + // start is the file start of the lock. + start uint64 + // end is the end file start of the lock. + end uint64 + // uid of lock holder. + uid UniqueID + + // Expected layout of the set after unlocking. + after []entry + }{ + { + name: "unlock zero length on empty set", + start: 0, + end: 0, + uid: 0, + }, + { + name: "unlock on empty set (no-op)", + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock uid not locked (no-op)", + // + --------------------------- + + // | Readers 1 & 2 | + // + --------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + // + --------------------------- + + // | Readers 1 & 2 | + // + --------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "unlock ReadLock over entire file", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock WriteLock over entire file", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock partial ReadLock (start)", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + // + ------ + --------------------------- + + // | gap | Readers 0 | + // +------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock partial WriteLock (start)", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + // + ------ + --------------------------- + + // | gap | Writer 0 | + // +------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock partial ReadLock (end)", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: LockEOF, + uid: 0, + // + --------------------------- + + // | Readers 0 | + // +---------------------------- + + // 0 4096 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + }, + }, + { + name: "unlock partial WriteLock (end)", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: LockEOF, + uid: 0, + // + --------------------------- + + // | Writer 0 | + // +---------------------------- + + // 0 4096 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 4096}, + }, + }, + }, + { + name: "unlock for single uid", + // + ------------- + --------- + ------------------- + + // | Readers 0 & 1 | Writer 0 | Readers 0 & 1 & 2 | + // + ------------- + --------- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + // + --------- + --- + --------------- + + // | Readers 1 | gap | Readers 1 & 2 | + // + --------- + --- + --------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock subsection locked", + // + ------------------------------- + + // | Readers 0 & 1 & 2 | + // + ------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + // + ----------------- + ------------- + ----------------- + + // | Readers 0 & 1 & 2 | Readers 1 & 2 | Readers 0 & 1 & 2 | + // + ----------------- + ------------- + ----------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock mid-gap to increase gap", + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 8, + end: 2048, + uid: 0, + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 8 4096 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 8}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock split region on uid mid-gap", + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 2048, + end: 8192, + uid: 0, + // + --------- + ----- + --------- + ------------- + + // | Writer 0 | gap | Readers 1 | Readers 0 & 1 | + // + --------- + ----- + --------- + ------------- + + // 0 1024 4096 8192 max uint64 + after: []entry{ + { + Lock: Lock{Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{4096, 8192}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{8192, LockEOF}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + l := fill(test.before) + + r := LockRange{Start: test.start, End: test.end} + l.unlock(test.uid, r) + var got []entry + for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + got = append(got, entry{ + Lock: seg.Value(), + LockRange: seg.Range(), + }) + } + if !equals(got, test.after) { + t.Errorf("got set %+v, want %+v", got, test.after) + } + }) + } +} diff --git a/pkg/sentry/fs/mock.go b/pkg/sentry/fs/mock.go new file mode 100644 index 000000000..1d6ea5736 --- /dev/null +++ b/pkg/sentry/fs/mock.go @@ -0,0 +1,176 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/syserror" +) + +// MockInodeOperations implements InodeOperations for testing Inodes. +type MockInodeOperations struct { + InodeOperations + + UAttr UnstableAttr + + createCalled bool + createDirectoryCalled bool + createLinkCalled bool + renameCalled bool + walkCalled bool +} + +// NewMockInode returns a mock *Inode using MockInodeOperations. +func NewMockInode(ctx context.Context, msrc *MountSource, sattr StableAttr) *Inode { + return NewInode(ctx, NewMockInodeOperations(ctx), msrc, sattr) +} + +// NewMockInodeOperations returns a *MockInodeOperations. +func NewMockInodeOperations(ctx context.Context) *MockInodeOperations { + return &MockInodeOperations{ + UAttr: WithCurrentTime(ctx, UnstableAttr{ + Perms: FilePermsFromMode(0777), + }), + } +} + +// MockMountSourceOps implements fs.MountSourceOperations. +type MockMountSourceOps struct { + MountSourceOperations + keep bool + revalidate bool +} + +// NewMockMountSource returns a new *MountSource using MockMountSourceOps. +func NewMockMountSource(cache *DirentCache) *MountSource { + var keep bool + if cache != nil { + keep = cache.maxSize > 0 + } + return &MountSource{ + MountSourceOperations: &MockMountSourceOps{keep: keep}, + fscache: cache, + } +} + +// Revalidate implements fs.MountSourceOperations.Revalidate. +func (n *MockMountSourceOps) Revalidate(context.Context, string, *Inode, *Inode) bool { + return n.revalidate +} + +// Keep implements fs.MountSourceOperations.Keep. +func (n *MockMountSourceOps) Keep(dirent *Dirent) bool { + return n.keep +} + +// CacheReaddir implements fs.MountSourceOperations.CacheReaddir. +func (n *MockMountSourceOps) CacheReaddir() bool { + // Common case: cache readdir results if there is a dirent cache. + return n.keep +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (n *MockInodeOperations) WriteOut(context.Context, *Inode) error { + return nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (n *MockInodeOperations) UnstableAttr(context.Context, *Inode) (UnstableAttr, error) { + return n.UAttr, nil +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (n *MockInodeOperations) IsVirtual() bool { + return false +} + +// Lookup implements fs.InodeOperations.Lookup. +func (n *MockInodeOperations) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + n.walkCalled = true + return NewDirent(ctx, NewInode(ctx, &MockInodeOperations{}, dir.MountSource, StableAttr{}), p), nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (n *MockInodeOperations) SetPermissions(context.Context, *Inode, FilePermissions) bool { + return false +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (*MockInodeOperations) SetOwner(context.Context, *Inode, FileOwner) error { + return syserror.EINVAL +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (n *MockInodeOperations) SetTimestamps(context.Context, *Inode, TimeSpec) error { + return nil +} + +// Create implements fs.InodeOperations.Create. +func (n *MockInodeOperations) Create(ctx context.Context, dir *Inode, p string, flags FileFlags, perms FilePermissions) (*File, error) { + n.createCalled = true + d := NewDirent(ctx, NewInode(ctx, &MockInodeOperations{}, dir.MountSource, StableAttr{}), p) + return &File{Dirent: d}, nil +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (n *MockInodeOperations) CreateLink(_ context.Context, dir *Inode, oldname string, newname string) error { + n.createLinkCalled = true + return nil +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (n *MockInodeOperations) CreateDirectory(context.Context, *Inode, string, FilePermissions) error { + n.createDirectoryCalled = true + return nil +} + +// Rename implements fs.InodeOperations.Rename. +func (n *MockInodeOperations) Rename(ctx context.Context, inode *Inode, oldParent *Inode, oldName string, newParent *Inode, newName string, replacement bool) error { + n.renameCalled = true + return nil +} + +// Check implements fs.InodeOperations.Check. +func (n *MockInodeOperations) Check(ctx context.Context, inode *Inode, p PermMask) bool { + return ContextCanAccessFile(ctx, inode, p) +} + +// Release implements fs.InodeOperations.Release. +func (n *MockInodeOperations) Release(context.Context) {} + +// Truncate implements fs.InodeOperations.Truncate. +func (n *MockInodeOperations) Truncate(ctx context.Context, inode *Inode, size int64) error { + return nil +} + +// Allocate implements fs.InodeOperations.Allocate. +func (n *MockInodeOperations) Allocate(ctx context.Context, inode *Inode, offset, length int64) error { + return nil +} + +// Remove implements fs.InodeOperations.Remove. +func (n *MockInodeOperations) Remove(context.Context, *Inode, string) error { + return nil +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (n *MockInodeOperations) RemoveDirectory(context.Context, *Inode, string) error { + return nil +} + +// Getlink implements fs.InodeOperations.Getlink. +func (n *MockInodeOperations) Getlink(context.Context, *Inode) (*Dirent, error) { + return nil, syserror.ENOLINK +} diff --git a/pkg/sentry/fs/mount.go b/pkg/sentry/fs/mount.go new file mode 100644 index 000000000..37bae6810 --- /dev/null +++ b/pkg/sentry/fs/mount.go @@ -0,0 +1,285 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "bytes" + "fmt" + "sync/atomic" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/refs" +) + +// DirentOperations provide file systems greater control over how long a Dirent +// stays pinned in core. Implementations must not take Dirent.mu. +type DirentOperations interface { + // Revalidate is called during lookup each time we encounter a Dirent + // in the cache. Implementations may update stale properties of the + // child Inode. If Revalidate returns true, then the entire Inode will + // be reloaded. + // + // Revalidate will never be called on a Inode that is mounted. + Revalidate(ctx context.Context, name string, parent, child *Inode) bool + + // Keep returns true if the Dirent should be kept in memory for as long + // as possible beyond any active references. + Keep(dirent *Dirent) bool + + // CacheReaddir returns true if directory entries returned by + // FileOperations.Readdir may be cached for future use. + // + // Postconditions: This method must always return the same value. + CacheReaddir() bool +} + +// MountSourceOperations contains filesystem specific operations. +type MountSourceOperations interface { + // DirentOperations provide optional extra management of Dirents. + DirentOperations + + // Destroy destroys the MountSource. + Destroy() + + // Below are MountSourceOperations that do not conform to Linux. + + // ResetInodeMappings clears all mappings of Inodes before SaveInodeMapping + // is called. + ResetInodeMappings() + + // SaveInodeMappings is called during saving to store, for each reachable + // Inode in the mounted filesystem, a mapping of Inode.StableAttr.InodeID + // to the Inode's path relative to its mount point. If an Inode is + // reachable at more than one path due to hard links, it is unspecified + // which path is mapped. Filesystems that do not use this information to + // restore inodes can make SaveInodeMappings a no-op. + SaveInodeMapping(inode *Inode, path string) +} + +// InodeMappings defines a fmt.Stringer MountSource Inode mappings. +type InodeMappings map[uint64]string + +// String implements fmt.Stringer.String. +func (i InodeMappings) String() string { + var mappingsBuf bytes.Buffer + mappingsBuf.WriteString("\n") + for ino, name := range i { + mappingsBuf.WriteString(fmt.Sprintf("\t%q\t\tinode number %d\n", name, ino)) + } + return mappingsBuf.String() +} + +// MountSource represents a source of file objects. +// +// MountSource corresponds to struct super_block in Linux. +// +// A mount source may represent a physical device (or a partition of a physical +// device) or a virtual source of files such as procfs for a specific PID +// namespace. There should be only one mount source per logical device. E.g. +// there should be only procfs mount source for a given PID namespace. +// +// A mount source represents files as inodes. Every inode belongs to exactly +// one mount source. Each file object may only be represented using one inode +// object in a sentry instance. +// +// TODO(b/63601033): Move Flags out of MountSource to Mount. +// +// +stateify savable +type MountSource struct { + refs.AtomicRefCount + + // MountSourceOperations defines filesystem specific behavior. + MountSourceOperations + + // FilesystemType is the type of the filesystem backing this mount. + FilesystemType string + + // Flags are the flags that this filesystem was mounted with. + Flags MountSourceFlags + + // fscache keeps Dirents pinned beyond application references to them. + // It must be flushed before kernel.SaveTo. + fscache *DirentCache + + // direntRefs is the sum of references on all Dirents in this MountSource. + // + // direntRefs is increased when a Dirent in MountSource is IncRef'd, and + // decreased when a Dirent in MountSource is DecRef'd. + // + // To cleanly unmount a MountSource, one must check that no direntRefs are + // held anymore. To check, one must hold root.parent.dirMu of the + // MountSource's root Dirent before reading direntRefs to prevent further + // walks to Dirents in this MountSource. + // + // direntRefs must be atomically changed. + direntRefs uint64 +} + +// DefaultDirentCacheSize is the number of Dirents that the VFS can hold an +// extra reference on. +const DefaultDirentCacheSize uint64 = 1000 + +// NewMountSource returns a new MountSource. Filesystem may be nil if there is no +// filesystem backing the mount. +func NewMountSource(ctx context.Context, mops MountSourceOperations, filesystem Filesystem, flags MountSourceFlags) *MountSource { + fsType := "none" + if filesystem != nil { + fsType = filesystem.Name() + } + msrc := MountSource{ + MountSourceOperations: mops, + Flags: flags, + FilesystemType: fsType, + fscache: NewDirentCache(DefaultDirentCacheSize), + } + msrc.EnableLeakCheck("fs.MountSource") + return &msrc +} + +// DirentRefs returns the current mount direntRefs. +func (msrc *MountSource) DirentRefs() uint64 { + return atomic.LoadUint64(&msrc.direntRefs) +} + +// IncDirentRefs increases direntRefs. +func (msrc *MountSource) IncDirentRefs() { + atomic.AddUint64(&msrc.direntRefs, 1) +} + +// DecDirentRefs decrements direntRefs. +func (msrc *MountSource) DecDirentRefs() { + if atomic.AddUint64(&msrc.direntRefs, ^uint64(0)) == ^uint64(0) { + panic("Decremented zero mount reference direntRefs") + } +} + +func (msrc *MountSource) destroy() { + if c := msrc.DirentRefs(); c != 0 { + panic(fmt.Sprintf("MountSource with non-zero direntRefs is being destroyed: %d", c)) + } + msrc.MountSourceOperations.Destroy() +} + +// DecRef drops a reference on the MountSource. +func (msrc *MountSource) DecRef() { + msrc.DecRefWithDestructor(msrc.destroy) +} + +// FlushDirentRefs drops all references held by the MountSource on Dirents. +func (msrc *MountSource) FlushDirentRefs() { + msrc.fscache.Invalidate() +} + +// SetDirentCacheMaxSize sets the max size to the dirent cache associated with +// this mount source. +func (msrc *MountSource) SetDirentCacheMaxSize(max uint64) { + msrc.fscache.setMaxSize(max) +} + +// SetDirentCacheLimiter sets the limiter objcet to the dirent cache associated +// with this mount source. +func (msrc *MountSource) SetDirentCacheLimiter(l *DirentCacheLimiter) { + msrc.fscache.limit = l +} + +// NewCachingMountSource returns a generic mount that will cache dirents +// aggressively. +func NewCachingMountSource(ctx context.Context, filesystem Filesystem, flags MountSourceFlags) *MountSource { + return NewMountSource(ctx, &SimpleMountSourceOperations{ + keep: true, + revalidate: false, + cacheReaddir: true, + }, filesystem, flags) +} + +// NewNonCachingMountSource returns a generic mount that will never cache dirents. +func NewNonCachingMountSource(ctx context.Context, filesystem Filesystem, flags MountSourceFlags) *MountSource { + return NewMountSource(ctx, &SimpleMountSourceOperations{ + keep: false, + revalidate: false, + cacheReaddir: false, + }, filesystem, flags) +} + +// NewRevalidatingMountSource returns a generic mount that will cache dirents, +// but will revalidate them on each lookup and always perform uncached readdir. +func NewRevalidatingMountSource(ctx context.Context, filesystem Filesystem, flags MountSourceFlags) *MountSource { + return NewMountSource(ctx, &SimpleMountSourceOperations{ + keep: true, + revalidate: true, + cacheReaddir: false, + }, filesystem, flags) +} + +// NewPseudoMountSource returns a "pseudo" mount source that is not backed by +// an actual filesystem. It is always non-caching. +func NewPseudoMountSource(ctx context.Context) *MountSource { + return NewMountSource(ctx, &SimpleMountSourceOperations{ + keep: false, + revalidate: false, + cacheReaddir: false, + }, nil, MountSourceFlags{}) +} + +// SimpleMountSourceOperations implements MountSourceOperations. +// +// +stateify savable +type SimpleMountSourceOperations struct { + keep bool + revalidate bool + cacheReaddir bool +} + +// Revalidate implements MountSourceOperations.Revalidate. +func (smo *SimpleMountSourceOperations) Revalidate(context.Context, string, *Inode, *Inode) bool { + return smo.revalidate +} + +// Keep implements MountSourceOperations.Keep. +func (smo *SimpleMountSourceOperations) Keep(*Dirent) bool { + return smo.keep +} + +// CacheReaddir implements MountSourceOperations.CacheReaddir. +func (smo *SimpleMountSourceOperations) CacheReaddir() bool { + return smo.cacheReaddir +} + +// ResetInodeMappings implements MountSourceOperations.ResetInodeMappings. +func (*SimpleMountSourceOperations) ResetInodeMappings() {} + +// SaveInodeMapping implements MountSourceOperations.SaveInodeMapping. +func (*SimpleMountSourceOperations) SaveInodeMapping(*Inode, string) {} + +// Destroy implements MountSourceOperations.Destroy. +func (*SimpleMountSourceOperations) Destroy() {} + +// Info defines attributes of a filesystem. +type Info struct { + // Type is the filesystem type magic value. + Type uint64 + + // TotalBlocks is the total data blocks in the filesystem. + TotalBlocks uint64 + + // FreeBlocks is the number of free blocks available. + FreeBlocks uint64 + + // TotalFiles is the total file nodes in the filesystem. + TotalFiles uint64 + + // FreeFiles is the number of free file nodes. + FreeFiles uint64 +} diff --git a/pkg/sentry/fs/mount_overlay.go b/pkg/sentry/fs/mount_overlay.go new file mode 100644 index 000000000..78e35b1e6 --- /dev/null +++ b/pkg/sentry/fs/mount_overlay.go @@ -0,0 +1,151 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/context" +) + +// overlayMountSourceOperations implements MountSourceOperations for an overlay +// mount point. The upper filesystem determines the caching behavior of the +// overlay. +// +// +stateify savable +type overlayMountSourceOperations struct { + upper *MountSource + lower *MountSource +} + +func newOverlayMountSource(ctx context.Context, upper, lower *MountSource, flags MountSourceFlags) *MountSource { + upper.IncRef() + lower.IncRef() + msrc := NewMountSource(ctx, &overlayMountSourceOperations{ + upper: upper, + lower: lower, + }, &overlayFilesystem{}, flags) + + // Use the minimum number to keep resource usage under limits. + size := lower.fscache.maxSize + if size > upper.fscache.maxSize { + size = upper.fscache.maxSize + } + msrc.fscache.setMaxSize(size) + + return msrc +} + +// Revalidate implements MountSourceOperations.Revalidate for an overlay by +// delegating to the upper filesystem's Revalidate method. We cannot reload +// files from the lower filesystem, so we panic if the lower filesystem's +// Revalidate method returns true. +func (o *overlayMountSourceOperations) Revalidate(ctx context.Context, name string, parent, child *Inode) bool { + if child.overlay == nil { + panic("overlay cannot revalidate inode that is not an overlay") + } + + // Revalidate is never called on a mount point, so parent and child + // must be from the same mount, and thus must both be overlay inodes. + if parent.overlay == nil { + panic("trying to revalidate an overlay inode but the parent is not an overlay") + } + + // We can't revalidate from the lower filesystem. + if child.overlay.lower != nil && o.lower.Revalidate(ctx, name, parent.overlay.lower, child.overlay.lower) { + panic("an overlay cannot revalidate file objects from the lower fs") + } + + var revalidate bool + child.overlay.copyMu.RLock() + if child.overlay.upper != nil { + // Does the upper require revalidation? + revalidate = o.upper.Revalidate(ctx, name, parent.overlay.upper, child.overlay.upper) + } else { + // Nothing to revalidate. + revalidate = false + } + child.overlay.copyMu.RUnlock() + return revalidate +} + +// Keep implements MountSourceOperations by delegating to the upper +// filesystem's Keep method. +func (o *overlayMountSourceOperations) Keep(dirent *Dirent) bool { + return o.upper.Keep(dirent) +} + +// CacheReaddir implements MountSourceOperations.CacheReaddir for an overlay by +// performing the logical AND of the upper and lower filesystems' CacheReaddir +// methods. +// +// N.B. This is fs-global instead of inode-specific because it must always +// return the same value. If it was inode-specific, we couldn't guarantee that +// property across copy up. +func (o *overlayMountSourceOperations) CacheReaddir() bool { + return o.lower.CacheReaddir() && o.upper.CacheReaddir() +} + +// ResetInodeMappings propagates the call to both upper and lower MountSource. +func (o *overlayMountSourceOperations) ResetInodeMappings() { + o.upper.ResetInodeMappings() + o.lower.ResetInodeMappings() +} + +// SaveInodeMapping propagates the call to both upper and lower MountSource. +func (o *overlayMountSourceOperations) SaveInodeMapping(inode *Inode, path string) { + inode.overlay.copyMu.RLock() + defer inode.overlay.copyMu.RUnlock() + if inode.overlay.upper != nil { + o.upper.SaveInodeMapping(inode.overlay.upper, path) + } + if inode.overlay.lower != nil { + o.lower.SaveInodeMapping(inode.overlay.lower, path) + } +} + +// Destroy drops references on the upper and lower MountSource. +func (o *overlayMountSourceOperations) Destroy() { + o.upper.DecRef() + o.lower.DecRef() +} + +// type overlayFilesystem is the filesystem for overlay mounts. +// +// +stateify savable +type overlayFilesystem struct{} + +// Name implements Filesystem.Name. +func (ofs *overlayFilesystem) Name() string { + return "overlayfs" +} + +// Flags implements Filesystem.Flags. +func (ofs *overlayFilesystem) Flags() FilesystemFlags { + return 0 +} + +// AllowUserMount implements Filesystem.AllowUserMount. +func (ofs *overlayFilesystem) AllowUserMount() bool { + return false +} + +// AllowUserList implements Filesystem.AllowUserList. +func (*overlayFilesystem) AllowUserList() bool { + return true +} + +// Mount implements Filesystem.Mount. +func (ofs *overlayFilesystem) Mount(ctx context.Context, device string, flags MountSourceFlags, data string, _ interface{}) (*Inode, error) { + panic("overlayFilesystem.Mount should not be called!") +} diff --git a/pkg/sentry/fs/mount_test.go b/pkg/sentry/fs/mount_test.go new file mode 100644 index 000000000..a3d10770b --- /dev/null +++ b/pkg/sentry/fs/mount_test.go @@ -0,0 +1,272 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "testing" + + "gvisor.dev/gvisor/pkg/sentry/contexttest" +) + +// cacheReallyContains iterates through the dirent cache to determine whether +// it contains the given dirent. +func cacheReallyContains(cache *DirentCache, d *Dirent) bool { + for i := cache.list.Front(); i != nil; i = i.Next() { + if i == d { + return true + } + } + return false +} + +func mountPathsAre(root *Dirent, got []*Mount, want ...string) error { + gotPaths := make(map[string]struct{}, len(got)) + gotStr := make([]string, len(got)) + for i, g := range got { + if groot := g.Root(); groot != nil { + name, _ := groot.FullName(root) + groot.DecRef() + gotStr[i] = name + gotPaths[name] = struct{}{} + } + } + if len(got) != len(want) { + return fmt.Errorf("mount paths are different, got: %q, want: %q", gotStr, want) + } + for _, w := range want { + if _, ok := gotPaths[w]; !ok { + return fmt.Errorf("no mount with path %q found", w) + } + } + return nil +} + +// TestMountSourceOnlyCachedOnce tests that a Dirent that is mounted over only ends +// up in a single Dirent Cache. NOTE(b/63848693): Having a dirent in multiple +// caches causes major consistency issues. +func TestMountSourceOnlyCachedOnce(t *testing.T) { + ctx := contexttest.Context(t) + + rootCache := NewDirentCache(100) + rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{ + Type: Directory, + }) + mm, err := NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + rootDirent := mm.Root() + defer rootDirent.DecRef() + + // Get a child of the root which we will mount over. Note that the + // MockInodeOperations causes Walk to always succeed. + child, err := rootDirent.Walk(ctx, rootDirent, "child") + if err != nil { + t.Fatalf("failed to walk to child dirent: %v", err) + } + child.maybeExtendReference() // Cache. + + // Ensure that the root cache contains the child. + if !cacheReallyContains(rootCache, child) { + t.Errorf("wanted rootCache to contain child dirent, but it did not") + } + + // Create a new cache and inode, and mount it over child. + submountCache := NewDirentCache(100) + submountInode := NewMockInode(ctx, NewMockMountSource(submountCache), StableAttr{ + Type: Directory, + }) + if err := mm.Mount(ctx, child, submountInode); err != nil { + t.Fatalf("failed to mount over child: %v", err) + } + + // Walk to the child again. + child2, err := rootDirent.Walk(ctx, rootDirent, "child") + if err != nil { + t.Fatalf("failed to walk to child dirent: %v", err) + } + + // Should have a different Dirent than before. + if child == child2 { + t.Fatalf("expected %v not equal to %v, but they are the same", child, child2) + } + + // Neither of the caches should no contain the child. + if cacheReallyContains(rootCache, child) { + t.Errorf("wanted rootCache not to contain child dirent, but it did") + } + if cacheReallyContains(submountCache, child) { + t.Errorf("wanted submountCache not to contain child dirent, but it did") + } +} + +func TestAllMountsUnder(t *testing.T) { + ctx := contexttest.Context(t) + + rootCache := NewDirentCache(100) + rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{ + Type: Directory, + }) + mm, err := NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + rootDirent := mm.Root() + defer rootDirent.DecRef() + + // Add mounts at the following paths: + paths := []string{ + "/foo", + "/foo/bar", + "/foo/bar/baz", + "/foo/qux", + "/waldo", + } + + var maxTraversals uint + for _, p := range paths { + maxTraversals = 0 + d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", p, err) + } + + submountInode := NewMockInode(ctx, NewMockMountSource(nil), StableAttr{ + Type: Directory, + }) + if err := mm.Mount(ctx, d, submountInode); err != nil { + t.Fatalf("could not mount at %q: %v", p, err) + } + d.DecRef() + } + + // mm root should contain all submounts (and does not include the root mount). + rootMnt := mm.FindMount(rootDirent) + submounts := mm.AllMountsUnder(rootMnt) + allPaths := append(paths, "/") + if err := mountPathsAre(rootDirent, submounts, allPaths...); err != nil { + t.Error(err) + } + + // Each mount should have a unique ID. + foundIDs := make(map[uint64]struct{}) + for _, m := range submounts { + if _, ok := foundIDs[m.ID]; ok { + t.Errorf("got multiple mounts with id %d", m.ID) + } + foundIDs[m.ID] = struct{}{} + } + + // Root mount should have no parent. + if p := rootMnt.ParentID; p != invalidMountID { + t.Errorf("root.Parent got %v wanted nil", p) + } + + // Check that "foo" mount has 3 children. + maxTraversals = 0 + d, err := mm.FindLink(ctx, rootDirent, nil, "/foo", &maxTraversals) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", "/foo", err) + } + defer d.DecRef() + submounts = mm.AllMountsUnder(mm.FindMount(d)) + if err := mountPathsAre(rootDirent, submounts, "/foo", "/foo/bar", "/foo/qux", "/foo/bar/baz"); err != nil { + t.Error(err) + } + + // "waldo" mount should have no children. + maxTraversals = 0 + waldo, err := mm.FindLink(ctx, rootDirent, nil, "/waldo", &maxTraversals) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", "/waldo", err) + } + defer waldo.DecRef() + submounts = mm.AllMountsUnder(mm.FindMount(waldo)) + if err := mountPathsAre(rootDirent, submounts, "/waldo"); err != nil { + t.Error(err) + } +} + +func TestUnmount(t *testing.T) { + ctx := contexttest.Context(t) + + rootCache := NewDirentCache(100) + rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{ + Type: Directory, + }) + mm, err := NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + rootDirent := mm.Root() + defer rootDirent.DecRef() + + // Add mounts at the following paths: + paths := []string{ + "/foo", + "/foo/bar", + "/foo/bar/goo", + "/foo/bar/goo/abc", + "/foo/abc", + "/foo/def", + "/waldo", + "/wally", + } + + var maxTraversals uint + for _, p := range paths { + maxTraversals = 0 + d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", p, err) + } + + submountInode := NewMockInode(ctx, NewMockMountSource(nil), StableAttr{ + Type: Directory, + }) + if err := mm.Mount(ctx, d, submountInode); err != nil { + t.Fatalf("could not mount at %q: %v", p, err) + } + d.DecRef() + } + + allPaths := make([]string, len(paths)+1) + allPaths[0] = "/" + copy(allPaths[1:], paths) + + rootMnt := mm.FindMount(rootDirent) + for i := len(paths) - 1; i >= 0; i-- { + maxTraversals = 0 + p := paths[i] + d, err := mm.FindLink(ctx, rootDirent, nil, p, &maxTraversals) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", p, err) + } + + if err := mm.Unmount(ctx, d, false); err != nil { + t.Fatalf("could not unmount at %q: %v", p, err) + } + d.DecRef() + + // Remove the path that has been unmounted and the check that the remaining + // mounts are still there. + allPaths = allPaths[:len(allPaths)-1] + submounts := mm.AllMountsUnder(rootMnt) + if err := mountPathsAre(rootDirent, submounts, allPaths...); err != nil { + t.Error(err) + } + } +} diff --git a/pkg/sentry/fs/mounts.go b/pkg/sentry/fs/mounts.go new file mode 100644 index 000000000..3f2bd0e87 --- /dev/null +++ b/pkg/sentry/fs/mounts.go @@ -0,0 +1,623 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "math" + "syscall" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" +) + +// DefaultTraversalLimit provides a sensible default traversal limit that may +// be passed to FindInode and FindLink. You may want to provide other options in +// individual syscall implementations, but for internal functions this will be +// sane. +const DefaultTraversalLimit = 10 + +const invalidMountID = math.MaxUint64 + +// Mount represents a mount in the file system. It holds the root dirent for the +// mount. It also points back to the dirent or mount where it was mounted over, +// so that it can be restored when unmounted. The chained mount can be either: +// - Mount: when it's mounted on top of another mount point. +// - Dirent: when it's mounted on top of a dirent. In this case the mount is +// called an "undo" mount and only 'root' is set. All other fields are +// either invalid or nil. +// +// +stateify savable +type Mount struct { + // ID is a unique id for this mount. It may be invalidMountID if this is + // used to cache a dirent that was mounted over. + ID uint64 + + // ParentID is the parent's mount unique id. It may be invalidMountID if this + // is the root mount or if this is used to cache a dirent that was mounted + // over. + ParentID uint64 + + // root is the root Dirent of this mount. A reference on this Dirent must be + // held through the lifetime of the Mount which contains it. + root *Dirent + + // previous is the existing dirent or mount that this object was mounted over. + // It's nil for the root mount and for the last entry in the chain (always an + // "undo" mount). + previous *Mount +} + +// newMount creates a new mount, taking a reference on 'root'. Caller must +// release the reference when it's done with the mount. +func newMount(id, pid uint64, root *Dirent) *Mount { + root.IncRef() + return &Mount{ + ID: id, + ParentID: pid, + root: root, + } +} + +// newRootMount creates a new root mount (no parent), taking a reference on +// 'root'. Caller must release the reference when it's done with the mount. +func newRootMount(id uint64, root *Dirent) *Mount { + root.IncRef() + return &Mount{ + ID: id, + ParentID: invalidMountID, + root: root, + } +} + +// newUndoMount creates a new undo mount, taking a reference on 'd'. Caller must +// release the reference when it's done with the mount. +func newUndoMount(d *Dirent) *Mount { + d.IncRef() + return &Mount{ + ID: invalidMountID, + ParentID: invalidMountID, + root: d, + } +} + +// Root returns the root dirent of this mount. +// +// This may return nil if the mount has already been free. Callers must handle this +// case appropriately. If non-nil, callers must call DecRef on the returned *Dirent. +func (m *Mount) Root() *Dirent { + if !m.root.TryIncRef() { + return nil + } + return m.root +} + +// IsRoot returns true if the mount has no parent. +func (m *Mount) IsRoot() bool { + return !m.IsUndo() && m.ParentID == invalidMountID +} + +// IsUndo returns true if 'm' is an undo mount that should be used to restore +// the original dirent during unmount only and it's not a valid mount. +func (m *Mount) IsUndo() bool { + if m.ID == invalidMountID { + if m.ParentID != invalidMountID { + panic(fmt.Sprintf("Undo mount with valid parentID: %+v", m)) + } + return true + } + return false +} + +// MountNamespace defines a VFS root. It contains collection of Mounts that are +// mounted inside the Dirent tree rooted at the Root Dirent. It provides +// methods for traversing the Dirent, and for mounting/unmounting in the tree. +// +// Note that this does not correspond to a "mount namespace" in the Linux. It +// is more like a unique VFS instance. +// +// It's possible for different processes to have different MountNamespaces. In +// this case, the file systems exposed to the processes are completely +// distinct. +// +// +stateify savable +type MountNamespace struct { + refs.AtomicRefCount + + // userns is the user namespace associated with this mount namespace. + // + // All privileged operations on this mount namespace must have + // appropriate capabilities in this userns. + // + // userns is immutable. + userns *auth.UserNamespace + + // root is the root directory. + root *Dirent + + // mu protects mounts and mountID counter. + mu sync.Mutex `state:"nosave"` + + // mounts is a map of mounted Dirent -> Mount object. There are three + // possible cases: + // - Dirent is mounted over a mount point: the stored Mount object will be + // the Mount for that mount point. + // - Dirent is mounted over a regular (non-mount point) Dirent: the stored + // Mount object will be an "undo" mount containing the mounted-over + // Dirent. + // - Dirent is the root mount: the stored Mount object will be a root mount + // containing the Dirent itself. + mounts map[*Dirent]*Mount + + // mountID is the next mount id to assign. + mountID uint64 +} + +// NewMountNamespace returns a new MountNamespace, with the provided node at the +// root, and the given cache size. A root must always be provided. +func NewMountNamespace(ctx context.Context, root *Inode) (*MountNamespace, error) { + // Set the root dirent and id on the root mount. The reference returned from + // NewDirent will be donated to the MountNamespace constructed below. + d := NewDirent(ctx, root, "/") + + mnts := map[*Dirent]*Mount{ + d: newRootMount(1, d), + } + + creds := auth.CredentialsFromContext(ctx) + mns := MountNamespace{ + userns: creds.UserNamespace, + root: d, + mounts: mnts, + mountID: 2, + } + mns.EnableLeakCheck("fs.MountNamespace") + return &mns, nil +} + +// UserNamespace returns the user namespace associated with this mount manager. +func (mns *MountNamespace) UserNamespace() *auth.UserNamespace { + return mns.userns +} + +// Root returns the MountNamespace's root Dirent and increments its reference +// count. The caller must call DecRef when finished. +func (mns *MountNamespace) Root() *Dirent { + mns.root.IncRef() + return mns.root +} + +// FlushMountSourceRefs flushes extra references held by MountSources for all active mount points; +// see fs/mount.go:MountSource.FlushDirentRefs. +func (mns *MountNamespace) FlushMountSourceRefs() { + mns.mu.Lock() + defer mns.mu.Unlock() + mns.flushMountSourceRefsLocked() +} + +func (mns *MountNamespace) flushMountSourceRefsLocked() { + // Flush mounts' MountSource references. + for _, mp := range mns.mounts { + for ; mp != nil; mp = mp.previous { + mp.root.Inode.MountSource.FlushDirentRefs() + } + } + + if mns.root == nil { + // No root? This MountSource must have already been destroyed. + // This can happen when a Save is triggered while a process is + // exiting. There is nothing to flush. + return + } + + // Flush root's MountSource references. + mns.root.Inode.MountSource.FlushDirentRefs() +} + +// destroy drops root and mounts dirent references and closes any original nodes. +// +// After destroy is called, the MountNamespace may continue to be referenced (for +// example via /proc/mounts), but should free all resources and shouldn't have +// Find* methods called. +func (mns *MountNamespace) destroy() { + mns.mu.Lock() + defer mns.mu.Unlock() + + // Flush all mounts' MountSource references to Dirents. This allows for mount + // points to be torn down since there should be no remaining references after + // this and DecRef below. + mns.flushMountSourceRefsLocked() + + // Teardown mounts. + for _, mp := range mns.mounts { + // Drop the mount reference on all mounted dirents. + for ; mp != nil; mp = mp.previous { + mp.root.DecRef() + } + } + mns.mounts = nil + + // Drop reference on the root. + mns.root.DecRef() + + // Ensure that root cannot be accessed via this MountNamespace any + // more. + mns.root = nil + + // Wait for asynchronous work (queued by dropping Dirent references + // above) to complete before destroying this MountNamespace. + AsyncBarrier() +} + +// DecRef implements RefCounter.DecRef with destructor mns.destroy. +func (mns *MountNamespace) DecRef() { + mns.DecRefWithDestructor(mns.destroy) +} + +// withMountLocked prevents further walks to `node`, because `node` is about to +// be a mount point. +func (mns *MountNamespace) withMountLocked(node *Dirent, fn func() error) error { + mns.mu.Lock() + defer mns.mu.Unlock() + + renameMu.Lock() + defer renameMu.Unlock() + + // Linux allows mounting over the root (?). It comes with a strange set + // of semantics. We'll just not do this for now. + if node.parent == nil { + return syserror.EBUSY + } + + // For both mount and unmount, we take this lock so we can swap out the + // appropriate child in parent.children. + // + // For unmount, this also ensures that if `node` is a mount point, the + // underlying mount's MountSource.direntRefs cannot increase by preventing + // walks to node. + node.parent.dirMu.Lock() + defer node.parent.dirMu.Unlock() + + node.parent.mu.Lock() + defer node.parent.mu.Unlock() + + // We need not take node.dirMu since we have parent.dirMu. + + // We need to take node.mu, so that we can check for deletion. + node.mu.Lock() + defer node.mu.Unlock() + + return fn() +} + +// Mount mounts a `inode` over the subtree at `node`. +func (mns *MountNamespace) Mount(ctx context.Context, mountPoint *Dirent, inode *Inode) error { + return mns.withMountLocked(mountPoint, func() error { + replacement, err := mountPoint.mount(ctx, inode) + if err != nil { + return err + } + defer replacement.DecRef() + + // Set the mount's root dirent and id. + parentMnt := mns.findMountLocked(mountPoint) + childMnt := newMount(mns.mountID, parentMnt.ID, replacement) + mns.mountID++ + + // Drop mountPoint from its dirent cache. + mountPoint.dropExtendedReference() + + // If mountPoint is already a mount, push mountPoint on the stack so it can + // be recovered on unmount. + if prev := mns.mounts[mountPoint]; prev != nil { + childMnt.previous = prev + mns.mounts[replacement] = childMnt + delete(mns.mounts, mountPoint) + return nil + } + + // Was not already mounted, just add another mount point. + childMnt.previous = newUndoMount(mountPoint) + mns.mounts[replacement] = childMnt + return nil + }) +} + +// Unmount ensures no references to the MountSource remain and removes `node` from +// this subtree. The subtree formerly mounted in `node`'s place will be +// restored. node's MountSource will be destroyed as soon as the last reference to +// `node` is dropped, as no references to Dirents within will remain. +// +// If detachOnly is set, Unmount merely removes `node` from the subtree, but +// allows existing references to the MountSource remain. E.g. if an open file still +// refers to Dirents in MountSource, the Unmount will succeed anyway and MountSource will +// be destroyed at a later time when all references to Dirents within are +// dropped. +// +// The caller must hold a reference to node from walking to it. +func (mns *MountNamespace) Unmount(ctx context.Context, node *Dirent, detachOnly bool) error { + // This takes locks to prevent further walks to Dirents in this mount + // under the assumption that `node` is the root of the mount. + return mns.withMountLocked(node, func() error { + orig, ok := mns.mounts[node] + if !ok { + // node is not a mount point. + return syserror.EINVAL + } + + if orig.previous == nil { + panic("cannot unmount initial dirent") + } + + m := node.Inode.MountSource + if !detachOnly { + // Flush all references on the mounted node. + m.FlushDirentRefs() + + // At this point, exactly two references must be held + // to mount: one mount reference on node, and one due + // to walking to node. + // + // We must also be guaranteed that no more references + // can be taken on mount. This is why withMountLocked + // must be held at this point to prevent any walks to + // and from node. + if refs := m.DirentRefs(); refs < 2 { + panic(fmt.Sprintf("have %d refs on unmount, expect 2 or more", refs)) + } else if refs != 2 { + return syserror.EBUSY + } + } + + prev := orig.previous + if err := node.unmount(ctx, prev.root); err != nil { + return err + } + + if prev.previous == nil { + if !prev.IsUndo() { + panic(fmt.Sprintf("Last mount in the chain must be a undo mount: %+v", prev)) + } + // Drop mount reference taken at the end of MountNamespace.Mount. + prev.root.DecRef() + } else { + mns.mounts[prev.root] = prev + } + delete(mns.mounts, node) + + return nil + }) +} + +// FindMount returns the mount that 'd' belongs to. It walks the dirent back +// until a mount is found. It may return nil if no mount was found. +func (mns *MountNamespace) FindMount(d *Dirent) *Mount { + mns.mu.Lock() + defer mns.mu.Unlock() + renameMu.Lock() + defer renameMu.Unlock() + + return mns.findMountLocked(d) +} + +func (mns *MountNamespace) findMountLocked(d *Dirent) *Mount { + for { + if mnt := mns.mounts[d]; mnt != nil { + return mnt + } + if d.parent == nil { + return nil + } + d = d.parent + } +} + +// AllMountsUnder returns a slice of all mounts under the parent, including +// itself. +func (mns *MountNamespace) AllMountsUnder(parent *Mount) []*Mount { + mns.mu.Lock() + defer mns.mu.Unlock() + + var rv []*Mount + for _, mp := range mns.mounts { + if !mp.IsUndo() && mp.root.descendantOf(parent.root) { + rv = append(rv, mp) + } + } + return rv +} + +// FindLink returns an Dirent from a given node, which may be a symlink. +// +// The root argument is treated as the root directory, and FindLink will not +// return anything above that. The wd dirent provides the starting directory, +// and may be nil which indicates the root should be used. You must call DecRef +// on the resulting Dirent when you are no longer using the object. +// +// If wd is nil, then the root will be used as the working directory. If the +// path is absolute, this has no functional impact. +// +// Precondition: root must be non-nil. +// Precondition: the path must be non-empty. +func (mns *MountNamespace) FindLink(ctx context.Context, root, wd *Dirent, path string, remainingTraversals *uint) (*Dirent, error) { + if root == nil { + panic("MountNamespace.FindLink: root must not be nil") + } + if len(path) == 0 { + panic("MountNamespace.FindLink: path is empty") + } + + // Split the path. + first, remainder := SplitFirst(path) + + // Where does this walk originate? + current := wd + if current == nil { + current = root + } + for first == "/" { + // Special case: it's possible that we have nothing to walk at + // all. This is necessary since we're resplitting the path. + if remainder == "" { + root.IncRef() + return root, nil + } + + // Start at the root and advance the path component so that the + // walk below can proceed. Note at this point, it handles the + // no-op walk case perfectly fine. + current = root + first, remainder = SplitFirst(remainder) + } + + current.IncRef() // Transferred during walk. + + for { + // Check that the file is a directory and that we have + // permissions to walk. + // + // Note that we elide this check for the root directory as an + // optimization; a non-executable root may still be walked. A + // non-directory root is hopeless. + if current != root { + if !IsDir(current.Inode.StableAttr) { + current.DecRef() // Drop reference from above. + return nil, syserror.ENOTDIR + } + if err := current.Inode.CheckPermission(ctx, PermMask{Execute: true}); err != nil { + current.DecRef() // Drop reference from above. + return nil, err + } + } + + // Move to the next level. + next, err := current.Walk(ctx, root, first) + if err != nil { + // Allow failed walks to cache the dirent, because no + // children will acquire a reference at the end. + current.maybeExtendReference() + current.DecRef() + return nil, err + } + + // Drop old reference. + current.DecRef() + + if remainder != "" { + // Ensure it's resolved, unless it's the last level. + // + // See resolve for reference semantics; on err next + // will have one dropped. + current, err = mns.resolve(ctx, root, next, remainingTraversals) + if err != nil { + return nil, err + } + } else { + // Allow the file system to take an extra reference on the + // found child. This will hold a reference on the containing + // directory, so the whole tree will be implicitly cached. + next.maybeExtendReference() + return next, nil + } + + // Move to the next element. + first, remainder = SplitFirst(remainder) + } +} + +// FindInode is identical to FindLink except the return value is resolved. +// +//go:nosplit +func (mns *MountNamespace) FindInode(ctx context.Context, root, wd *Dirent, path string, remainingTraversals *uint) (*Dirent, error) { + d, err := mns.FindLink(ctx, root, wd, path, remainingTraversals) + if err != nil { + return nil, err + } + + // See resolve for reference semantics; on err d will have the + // reference dropped. + return mns.resolve(ctx, root, d, remainingTraversals) +} + +// resolve resolves the given link. +// +// If successful, a reference is dropped on node and one is acquired on the +// caller's behalf for the returned dirent. +// +// If not successful, a reference is _also_ dropped on the node and an error +// returned. This is for convenience in using resolve directly as a return +// value. +func (mns *MountNamespace) resolve(ctx context.Context, root, node *Dirent, remainingTraversals *uint) (*Dirent, error) { + // Resolve the path. + target, err := node.Inode.Getlink(ctx) + + switch err { + case nil: + // Make sure we didn't exhaust the traversal budget. + if *remainingTraversals == 0 { + target.DecRef() + return nil, syscall.ELOOP + } + + node.DecRef() // Drop the original reference. + return target, nil + + case syscall.ENOLINK: + // Not a symlink. + return node, nil + + case ErrResolveViaReadlink: + defer node.DecRef() // See above. + + // First, check if we should traverse. + if *remainingTraversals == 0 { + return nil, syscall.ELOOP + } + + // Read the target path. + targetPath, err := node.Inode.Readlink(ctx) + if err != nil { + return nil, err + } + + // Find the node; we resolve relative to the current symlink's parent. + renameMu.RLock() + parent := node.parent + renameMu.RUnlock() + *remainingTraversals-- + d, err := mns.FindInode(ctx, root, parent, targetPath, remainingTraversals) + if err != nil { + return nil, err + } + + return d, err + + default: + node.DecRef() // Drop for err; see above. + + // Propagate the error. + return nil, err + } +} + +// SyncAll calls Dirent.SyncAll on the root. +func (mns *MountNamespace) SyncAll(ctx context.Context) { + mns.mu.Lock() + defer mns.mu.Unlock() + mns.root.SyncAll(ctx) +} diff --git a/pkg/sentry/fs/mounts_test.go b/pkg/sentry/fs/mounts_test.go new file mode 100644 index 000000000..a69b41468 --- /dev/null +++ b/pkg/sentry/fs/mounts_test.go @@ -0,0 +1,105 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" +) + +// Creates a new MountNamespace with filesystem: +// / (root dir) +// |-foo (dir) +// |-bar (file) +func createMountNamespace(ctx context.Context) (*fs.MountNamespace, error) { + perms := fs.FilePermsFromMode(0777) + m := fs.NewPseudoMountSource(ctx) + + barFile := fsutil.NewSimpleFileInode(ctx, fs.RootOwner, perms, 0) + fooDir := ramfs.NewDir(ctx, map[string]*fs.Inode{ + "bar": fs.NewInode(ctx, barFile, m, fs.StableAttr{Type: fs.RegularFile}), + }, fs.RootOwner, perms) + rootDir := ramfs.NewDir(ctx, map[string]*fs.Inode{ + "foo": fs.NewInode(ctx, fooDir, m, fs.StableAttr{Type: fs.Directory}), + }, fs.RootOwner, perms) + + return fs.NewMountNamespace(ctx, fs.NewInode(ctx, rootDir, m, fs.StableAttr{Type: fs.Directory})) +} + +func TestFindLink(t *testing.T) { + ctx := contexttest.Context(t) + mm, err := createMountNamespace(ctx) + if err != nil { + t.Fatalf("createMountNamespace failed: %v", err) + } + + root := mm.Root() + defer root.DecRef() + foo, err := root.Walk(ctx, root, "foo") + if err != nil { + t.Fatalf("Error walking to foo: %v", err) + } + + // Positive cases. + for _, tc := range []struct { + findPath string + wd *fs.Dirent + wantPath string + }{ + {".", root, "/"}, + {".", foo, "/foo"}, + {"..", foo, "/"}, + {"../../..", foo, "/"}, + {"///foo", foo, "/foo"}, + {"/foo", foo, "/foo"}, + {"/foo/bar", foo, "/foo/bar"}, + {"/foo/.///./bar", foo, "/foo/bar"}, + {"/foo///bar", foo, "/foo/bar"}, + {"/foo/../foo/bar", foo, "/foo/bar"}, + {"foo/bar", root, "/foo/bar"}, + {"foo////bar", root, "/foo/bar"}, + {"bar", foo, "/foo/bar"}, + } { + wdPath, _ := tc.wd.FullName(root) + maxTraversals := uint(0) + if d, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, &maxTraversals); err != nil { + t.Errorf("FindLink(%q, wd=%q) failed: %v", tc.findPath, wdPath, err) + } else if got, _ := d.FullName(root); got != tc.wantPath { + t.Errorf("FindLink(%q, wd=%q) got dirent %q, want %q", tc.findPath, wdPath, got, tc.wantPath) + } + } + + // Negative cases. + for _, tc := range []struct { + findPath string + wd *fs.Dirent + }{ + {"bar", root}, + {"/bar", root}, + {"/foo/../../bar", root}, + {"foo", foo}, + } { + wdPath, _ := tc.wd.FullName(root) + maxTraversals := uint(0) + if _, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, &maxTraversals); err == nil { + t.Errorf("FindLink(%q, wd=%q) did not return error", tc.findPath, wdPath) + } + } +} diff --git a/pkg/sentry/fs/offset.go b/pkg/sentry/fs/offset.go new file mode 100644 index 000000000..53b5df175 --- /dev/null +++ b/pkg/sentry/fs/offset.go @@ -0,0 +1,65 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "math" + + "gvisor.dev/gvisor/pkg/usermem" +) + +// OffsetPageEnd returns the file offset rounded up to the nearest +// page boundary. OffsetPageEnd panics if rounding up causes overflow, +// which shouldn't be possible given that offset is an int64. +func OffsetPageEnd(offset int64) uint64 { + end, ok := usermem.Addr(offset).RoundUp() + if !ok { + panic("impossible overflow") + } + return uint64(end) +} + +// ReadEndOffset returns an exclusive end offset for a read operation +// so that the read does not overflow an int64 nor size. +// +// Parameters: +// - offset: the starting offset of the read. +// - length: the number of bytes to read. +// - size: the size of the file. +// +// Postconditions: The returned offset is >= offset. +func ReadEndOffset(offset int64, length int64, size int64) int64 { + if offset >= size { + return offset + } + end := offset + length + // Don't overflow. + if end < offset || end > size { + end = size + } + return end +} + +// WriteEndOffset returns an exclusive end offset for a write operation +// so that the write does not overflow an int64. +// +// Parameters: +// - offset: the starting offset of the write. +// - length: the number of bytes to write. +// +// Postconditions: The returned offset is >= offset. +func WriteEndOffset(offset int64, length int64) int64 { + return ReadEndOffset(offset, length, math.MaxInt64) +} diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go new file mode 100644 index 000000000..a8ae7d81d --- /dev/null +++ b/pkg/sentry/fs/overlay.go @@ -0,0 +1,320 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "strings" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +// The virtual filesystem implements an overlay configuration. For a high-level +// description, see README.md. +// +// Note on whiteouts: +// +// This implementation does not use the "Docker-style" whiteouts (symlinks with +// ".wh." prefix). Instead upper filesystem directories support a set of extended +// attributes to encode whiteouts: "trusted.overlay.whiteout.<filename>". This +// gives flexibility to persist whiteouts independently of the filesystem layout +// while additionally preventing name conflicts with files prefixed with ".wh.". +// +// Known deficiencies: +// +// - The device number of two files under the same overlay mount point may be +// different. This can happen if a file is found in the lower filesystem (takes +// the lower filesystem device) and another file is created in the upper +// filesystem (takes the upper filesystem device). This may appear odd but +// should not break applications. +// +// - Registered events on files (i.e. for notification of read/write readiness) +// are not copied across copy up. This is fine in the common case of files that +// do not block. For files that do block, like pipes and sockets, copy up is not +// supported. +// +// - Hardlinks in a lower filesystem are broken by copy up. For this reason, no +// attempt is made to preserve link count across copy up. +// +// - The maximum length of an extended attribute name is the same as the maximum +// length of a file path in Linux (XATTR_NAME_MAX == NAME_MAX). This means that +// whiteout attributes, if set directly on the host, are limited additionally by +// the extra whiteout prefix length (file paths must be strictly shorter than +// NAME_MAX). This is not a problem for in-memory filesystems which don't enforce +// XATTR_NAME_MAX. + +const ( + // XattrOverlayPrefix is the prefix for extended attributes that affect + // the behavior of an overlay. + XattrOverlayPrefix = "trusted.overlay." + + // XattrOverlayWhiteoutPrefix is the prefix for extended attributes + // that indicate that a whiteout exists. + XattrOverlayWhiteoutPrefix = XattrOverlayPrefix + "whiteout." +) + +// XattrOverlayWhiteout returns an extended attribute that indicates a +// whiteout exists for name. It is supported by directories that wish to +// mask the existence of name. +func XattrOverlayWhiteout(name string) string { + return XattrOverlayWhiteoutPrefix + name +} + +// isXattrOverlay returns whether the given extended attribute configures the +// overlay. +func isXattrOverlay(name string) bool { + return strings.HasPrefix(name, XattrOverlayPrefix) +} + +// NewOverlayRoot produces the root of an overlay. +// +// Preconditions: +// +// - upper and lower must be non-nil. +// - upper must not be an overlay. +// - lower should not expose character devices, pipes, or sockets, because +// copying up these types of files is not supported. +// - lower must not require that file objects be revalidated. +// - lower must not have dynamic file/directory content. +func NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags MountSourceFlags) (*Inode, error) { + if !IsDir(upper.StableAttr) { + return nil, fmt.Errorf("upper Inode is a %v, not a directory", upper.StableAttr.Type) + } + if !IsDir(lower.StableAttr) { + return nil, fmt.Errorf("lower Inode is a %v, not a directory", lower.StableAttr.Type) + } + if upper.overlay != nil { + return nil, fmt.Errorf("cannot nest overlay in upper file of another overlay") + } + + msrc := newOverlayMountSource(ctx, upper.MountSource, lower.MountSource, flags) + overlay, err := newOverlayEntry(ctx, upper, lower, true) + if err != nil { + msrc.DecRef() + return nil, err + } + + return newOverlayInode(ctx, overlay, msrc), nil +} + +// NewOverlayRootFile produces the root of an overlay that points to a file. +// +// Preconditions: +// +// - lower must be non-nil. +// - lower should not expose character devices, pipes, or sockets, because +// copying up these types of files is not supported. Neither it can be a dir. +// - lower must not require that file objects be revalidated. +// - lower must not have dynamic file/directory content. +func NewOverlayRootFile(ctx context.Context, upperMS *MountSource, lower *Inode, flags MountSourceFlags) (*Inode, error) { + if !IsRegular(lower.StableAttr) { + return nil, fmt.Errorf("lower Inode is not a regular file") + } + msrc := newOverlayMountSource(ctx, upperMS, lower.MountSource, flags) + overlay, err := newOverlayEntry(ctx, nil, lower, true) + if err != nil { + msrc.DecRef() + return nil, err + } + return newOverlayInode(ctx, overlay, msrc), nil +} + +// newOverlayInode creates a new Inode for an overlay. +func newOverlayInode(ctx context.Context, o *overlayEntry, msrc *MountSource) *Inode { + var inode *Inode + if o.upper != nil { + inode = NewInode(ctx, nil, msrc, o.upper.StableAttr) + } else { + inode = NewInode(ctx, nil, msrc, o.lower.StableAttr) + } + inode.overlay = o + return inode +} + +// overlayEntry is the overlay metadata of an Inode. It implements Mappable. +// +// +stateify savable +type overlayEntry struct { + // lowerExists is true if an Inode exists for this file in the lower + // filesystem. If lowerExists is true, then the overlay must create + // a whiteout entry when renaming and removing this entry to mask the + // lower Inode. + // + // Note that this is distinct from actually holding onto a non-nil + // lower Inode (below). The overlay does not need to keep a lower Inode + // around unless it needs to operate on it, but it always needs to know + // whether the lower Inode exists to correctly execute a rename or + // remove operation. + lowerExists bool + + // lower is an Inode from a lower filesystem. Modifications are + // never made on this Inode. + lower *Inode + + // copyMu serializes copy-up for operations above + // mm.MemoryManager.mappingMu in the lock order. + copyMu sync.RWMutex `state:"nosave"` + + // mapsMu serializes copy-up for operations between + // mm.MemoryManager.mappingMu and mm.MemoryManager.activeMu in the lock + // order. + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks memory mappings of this Mappable so they can be removed + // from the lower filesystem Mappable and added to the upper filesystem + // Mappable when copy up occurs. It is strictly unnecessary after copy-up. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + // dataMu serializes copy-up for operations below mm.MemoryManager.activeMu + // in the lock order. + dataMu sync.RWMutex `state:"nosave"` + + // upper is an Inode from an upper filesystem. It is non-nil if + // the file exists in the upper filesystem. It becomes non-nil + // when the Inode that owns this overlayEntry is modified. + // + // upper is protected by all of copyMu, mapsMu, and dataMu. Holding any of + // these locks is sufficient to read upper; holding all three for writing + // is required to mutate it. + upper *Inode + + // dirCacheMu protects dirCache. + dirCacheMu sync.RWMutex `state:"nosave"` + + // dirCache is cache of DentAttrs from upper and lower Inodes. + dirCache *SortedDentryMap +} + +// newOverlayEntry returns a new overlayEntry. +func newOverlayEntry(ctx context.Context, upper *Inode, lower *Inode, lowerExists bool) (*overlayEntry, error) { + if upper == nil && lower == nil { + panic("invalid overlayEntry, needs at least one Inode") + } + if upper != nil && upper.overlay != nil { + panic("nested writable layers are not supported") + } + // Check for supported lower filesystem types. + if lower != nil { + switch lower.StableAttr.Type { + case RegularFile, Directory, Symlink, Socket: + default: + // We don't support copying up from character devices, + // named pipes, or anything weird (like proc files). + log.Warningf("%s not supported in lower filesytem", lower.StableAttr.Type) + return nil, syserror.EINVAL + } + } + return &overlayEntry{ + lowerExists: lowerExists, + lower: lower, + upper: upper, + }, nil +} + +func (o *overlayEntry) release() { + // We drop a reference on upper and lower file system Inodes + // rather than releasing them, because in-memory filesystems + // may hold an extra reference to these Inodes so that they + // stay in memory. + if o.upper != nil { + o.upper.DecRef() + } + if o.lower != nil { + o.lower.DecRef() + } +} + +// overlayUpperMountSource gives the upper mount of an overlay mount. +// +// The caller may not use this MountSource past the lifetime of overlayMountSource and may +// not call DecRef on it. +func overlayUpperMountSource(overlayMountSource *MountSource) *MountSource { + return overlayMountSource.MountSourceOperations.(*overlayMountSourceOperations).upper +} + +// Preconditions: At least one of o.copyMu, o.mapsMu, or o.dataMu must be locked. +func (o *overlayEntry) inodeLocked() *Inode { + if o.upper != nil { + return o.upper + } + return o.lower +} + +// Preconditions: At least one of o.copyMu, o.mapsMu, or o.dataMu must be locked. +func (o *overlayEntry) isMappableLocked() bool { + return o.inodeLocked().Mappable() != nil +} + +// markDirectoryDirty marks any cached data dirty for this directory. This is +// necessary in order to ensure that this node does not retain stale state +// throughout its lifetime across multiple open directory handles. +// +// Currently this means invalidating any readdir caches. +func (o *overlayEntry) markDirectoryDirty() { + o.dirCacheMu.Lock() + o.dirCache = nil + o.dirCacheMu.Unlock() +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset, writable); err != nil { + return err + } + o.mappings.AddMapping(ms, ar, offset, writable) + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset, writable) + o.mappings.RemoveMapping(ms, ar, offset, writable) +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset, writable); err != nil { + return err + } + o.mappings.AddMapping(ms, dstAR, offset, writable) + return nil +} + +// Translate implements memmap.Mappable.Translate. +func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + o.dataMu.RLock() + defer o.dataMu.RUnlock() + return o.inodeLocked().Mappable().Translate(ctx, required, optional, at) +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (o *overlayEntry) InvalidateUnsavable(ctx context.Context) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + return o.inodeLocked().Mappable().InvalidateUnsavable(ctx) +} diff --git a/pkg/sentry/fs/path.go b/pkg/sentry/fs/path.go new file mode 100644 index 000000000..e4dc02dbb --- /dev/null +++ b/pkg/sentry/fs/path.go @@ -0,0 +1,119 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "path/filepath" + "strings" +) + +// TrimTrailingSlashes trims any trailing slashes. +// +// The returned boolean indicates whether any changes were made. +// +//go:nosplit +func TrimTrailingSlashes(dir string) (trimmed string, changed bool) { + // Trim the trailing slash, except for root. + for len(dir) > 1 && dir[len(dir)-1] == '/' { + dir = dir[:len(dir)-1] + changed = true + } + return dir, changed +} + +// SplitLast splits the given path into a directory and a file. +// +// The "absoluteness" of the path is preserved, but dir is always stripped of +// trailing slashes. +// +//go:nosplit +func SplitLast(path string) (dir, file string) { + path, _ = TrimTrailingSlashes(path) + if path == "" { + return ".", "." + } else if path == "/" { + return "/", "." + } + + var slash int // Last location of slash in path. + for slash = len(path) - 1; slash >= 0 && path[slash] != '/'; slash-- { + } + switch { + case slash < 0: + return ".", path + case slash == 0: + // Directory of the form "/foo", or just "/". We need to + // preserve the first slash here, since it indicates an + // absolute path. + return "/", path[1:] + default: + // Drop the trailing slash. + dir, _ = TrimTrailingSlashes(path[:slash]) + return dir, path[slash+1:] + } +} + +// SplitFirst splits the given path into a first directory and the remainder. +// +// If remainder is empty, then the path is a single element. +// +//go:nosplit +func SplitFirst(path string) (current, remainder string) { + path, _ = TrimTrailingSlashes(path) + if path == "" { + return ".", "" + } + + var slash int // First location of slash in path. + for slash = 0; slash < len(path) && path[slash] != '/'; slash++ { + } + switch { + case slash >= len(path): + return path, "" + case slash == 0: + // See above. + return "/", path[1:] + default: + current = path[:slash] + remainder = path[slash+1:] + // Strip redundant slashes. + for len(remainder) > 0 && remainder[0] == '/' { + remainder = remainder[1:] + } + return current, remainder + } +} + +// IsSubpath checks whether the first path is a (strict) descendent of the +// second. If it is a subpath, then true is returned along with a clean +// relative path from the second path to the first. Otherwise false is +// returned. +func IsSubpath(subpath, path string) (string, bool) { + cleanPath := filepath.Clean(path) + cleanSubpath := filepath.Clean(subpath) + + // Add a trailing slash to the path if it does not already have one. + if len(cleanPath) == 0 || cleanPath[len(cleanPath)-1] != '/' { + cleanPath += "/" + } + if cleanPath == cleanSubpath { + // Paths are equal, thus not a strict subpath. + return "", false + } + if strings.HasPrefix(cleanSubpath, cleanPath) { + return strings.TrimPrefix(cleanSubpath, cleanPath), true + } + return "", false +} diff --git a/pkg/sentry/fs/path_test.go b/pkg/sentry/fs/path_test.go new file mode 100644 index 000000000..e6f57ebba --- /dev/null +++ b/pkg/sentry/fs/path_test.go @@ -0,0 +1,289 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "testing" +) + +// TestSplitLast tests variants of path splitting. +func TestSplitLast(t *testing.T) { + cases := []struct { + path string + dir string + file string + }{ + {path: "/", dir: "/", file: "."}, + {path: "/.", dir: "/", file: "."}, + {path: "/./", dir: "/", file: "."}, + {path: "/./.", dir: "/.", file: "."}, + {path: "/././", dir: "/.", file: "."}, + {path: "/./..", dir: "/.", file: ".."}, + {path: "/./../", dir: "/.", file: ".."}, + {path: "/..", dir: "/", file: ".."}, + {path: "/../", dir: "/", file: ".."}, + {path: "/../.", dir: "/..", file: "."}, + {path: "/.././", dir: "/..", file: "."}, + {path: "/../..", dir: "/..", file: ".."}, + {path: "/../../", dir: "/..", file: ".."}, + + {path: "", dir: ".", file: "."}, + {path: ".", dir: ".", file: "."}, + {path: "./", dir: ".", file: "."}, + {path: "./.", dir: ".", file: "."}, + {path: "././", dir: ".", file: "."}, + {path: "./..", dir: ".", file: ".."}, + {path: "./../", dir: ".", file: ".."}, + {path: "..", dir: ".", file: ".."}, + {path: "../", dir: ".", file: ".."}, + {path: "../.", dir: "..", file: "."}, + {path: ".././", dir: "..", file: "."}, + {path: "../..", dir: "..", file: ".."}, + {path: "../../", dir: "..", file: ".."}, + + {path: "/foo", dir: "/", file: "foo"}, + {path: "/foo/", dir: "/", file: "foo"}, + {path: "/foo/.", dir: "/foo", file: "."}, + {path: "/foo/./", dir: "/foo", file: "."}, + {path: "/foo/./.", dir: "/foo/.", file: "."}, + {path: "/foo/./..", dir: "/foo/.", file: ".."}, + {path: "/foo/..", dir: "/foo", file: ".."}, + {path: "/foo/../", dir: "/foo", file: ".."}, + {path: "/foo/../.", dir: "/foo/..", file: "."}, + {path: "/foo/../..", dir: "/foo/..", file: ".."}, + + {path: "/foo/bar", dir: "/foo", file: "bar"}, + {path: "/foo/bar/", dir: "/foo", file: "bar"}, + {path: "/foo/bar/.", dir: "/foo/bar", file: "."}, + {path: "/foo/bar/./", dir: "/foo/bar", file: "."}, + {path: "/foo/bar/./.", dir: "/foo/bar/.", file: "."}, + {path: "/foo/bar/./..", dir: "/foo/bar/.", file: ".."}, + {path: "/foo/bar/..", dir: "/foo/bar", file: ".."}, + {path: "/foo/bar/../", dir: "/foo/bar", file: ".."}, + {path: "/foo/bar/../.", dir: "/foo/bar/..", file: "."}, + {path: "/foo/bar/../..", dir: "/foo/bar/..", file: ".."}, + + {path: "foo", dir: ".", file: "foo"}, + {path: "foo", dir: ".", file: "foo"}, + {path: "foo/", dir: ".", file: "foo"}, + {path: "foo/.", dir: "foo", file: "."}, + {path: "foo/./", dir: "foo", file: "."}, + {path: "foo/./.", dir: "foo/.", file: "."}, + {path: "foo/./..", dir: "foo/.", file: ".."}, + {path: "foo/..", dir: "foo", file: ".."}, + {path: "foo/../", dir: "foo", file: ".."}, + {path: "foo/../.", dir: "foo/..", file: "."}, + {path: "foo/../..", dir: "foo/..", file: ".."}, + {path: "foo/", dir: ".", file: "foo"}, + {path: "foo/.", dir: "foo", file: "."}, + + {path: "foo/bar", dir: "foo", file: "bar"}, + {path: "foo/bar/", dir: "foo", file: "bar"}, + {path: "foo/bar/.", dir: "foo/bar", file: "."}, + {path: "foo/bar/./", dir: "foo/bar", file: "."}, + {path: "foo/bar/./.", dir: "foo/bar/.", file: "."}, + {path: "foo/bar/./..", dir: "foo/bar/.", file: ".."}, + {path: "foo/bar/..", dir: "foo/bar", file: ".."}, + {path: "foo/bar/../", dir: "foo/bar", file: ".."}, + {path: "foo/bar/../.", dir: "foo/bar/..", file: "."}, + {path: "foo/bar/../..", dir: "foo/bar/..", file: ".."}, + {path: "foo/bar/", dir: "foo", file: "bar"}, + {path: "foo/bar/.", dir: "foo/bar", file: "."}, + } + + for _, c := range cases { + dir, file := SplitLast(c.path) + if dir != c.dir || file != c.file { + t.Errorf("SplitLast(%q) got (%q, %q), expected (%q, %q)", c.path, dir, file, c.dir, c.file) + } + } +} + +// TestSplitFirst tests variants of path splitting. +func TestSplitFirst(t *testing.T) { + cases := []struct { + path string + first string + remainder string + }{ + {path: "/", first: "/", remainder: ""}, + {path: "/.", first: "/", remainder: "."}, + {path: "///.", first: "/", remainder: "//."}, + {path: "/.///", first: "/", remainder: "."}, + {path: "/./.", first: "/", remainder: "./."}, + {path: "/././", first: "/", remainder: "./."}, + {path: "/./..", first: "/", remainder: "./.."}, + {path: "/./../", first: "/", remainder: "./.."}, + {path: "/..", first: "/", remainder: ".."}, + {path: "/../", first: "/", remainder: ".."}, + {path: "/../.", first: "/", remainder: "../."}, + {path: "/.././", first: "/", remainder: "../."}, + {path: "/../..", first: "/", remainder: "../.."}, + {path: "/../../", first: "/", remainder: "../.."}, + + {path: "", first: ".", remainder: ""}, + {path: ".", first: ".", remainder: ""}, + {path: "./", first: ".", remainder: ""}, + {path: ".///", first: ".", remainder: ""}, + {path: "./.", first: ".", remainder: "."}, + {path: "././", first: ".", remainder: "."}, + {path: "./..", first: ".", remainder: ".."}, + {path: "./../", first: ".", remainder: ".."}, + {path: "..", first: "..", remainder: ""}, + {path: "../", first: "..", remainder: ""}, + {path: "../.", first: "..", remainder: "."}, + {path: ".././", first: "..", remainder: "."}, + {path: "../..", first: "..", remainder: ".."}, + {path: "../../", first: "..", remainder: ".."}, + + {path: "/foo", first: "/", remainder: "foo"}, + {path: "/foo/", first: "/", remainder: "foo"}, + {path: "/foo///", first: "/", remainder: "foo"}, + {path: "/foo/.", first: "/", remainder: "foo/."}, + {path: "/foo/./", first: "/", remainder: "foo/."}, + {path: "/foo/./.", first: "/", remainder: "foo/./."}, + {path: "/foo/./..", first: "/", remainder: "foo/./.."}, + {path: "/foo/..", first: "/", remainder: "foo/.."}, + {path: "/foo/../", first: "/", remainder: "foo/.."}, + {path: "/foo/../.", first: "/", remainder: "foo/../."}, + {path: "/foo/../..", first: "/", remainder: "foo/../.."}, + + {path: "/foo/bar", first: "/", remainder: "foo/bar"}, + {path: "///foo/bar", first: "/", remainder: "//foo/bar"}, + {path: "/foo///bar", first: "/", remainder: "foo///bar"}, + {path: "/foo/bar/.", first: "/", remainder: "foo/bar/."}, + {path: "/foo/bar/./", first: "/", remainder: "foo/bar/."}, + {path: "/foo/bar/./.", first: "/", remainder: "foo/bar/./."}, + {path: "/foo/bar/./..", first: "/", remainder: "foo/bar/./.."}, + {path: "/foo/bar/..", first: "/", remainder: "foo/bar/.."}, + {path: "/foo/bar/../", first: "/", remainder: "foo/bar/.."}, + {path: "/foo/bar/../.", first: "/", remainder: "foo/bar/../."}, + {path: "/foo/bar/../..", first: "/", remainder: "foo/bar/../.."}, + + {path: "foo", first: "foo", remainder: ""}, + {path: "foo", first: "foo", remainder: ""}, + {path: "foo/", first: "foo", remainder: ""}, + {path: "foo///", first: "foo", remainder: ""}, + {path: "foo/.", first: "foo", remainder: "."}, + {path: "foo/./", first: "foo", remainder: "."}, + {path: "foo/./.", first: "foo", remainder: "./."}, + {path: "foo/./..", first: "foo", remainder: "./.."}, + {path: "foo/..", first: "foo", remainder: ".."}, + {path: "foo/../", first: "foo", remainder: ".."}, + {path: "foo/../.", first: "foo", remainder: "../."}, + {path: "foo/../..", first: "foo", remainder: "../.."}, + {path: "foo/", first: "foo", remainder: ""}, + {path: "foo/.", first: "foo", remainder: "."}, + + {path: "foo/bar", first: "foo", remainder: "bar"}, + {path: "foo///bar", first: "foo", remainder: "bar"}, + {path: "foo/bar/", first: "foo", remainder: "bar"}, + {path: "foo/bar/.", first: "foo", remainder: "bar/."}, + {path: "foo/bar/./", first: "foo", remainder: "bar/."}, + {path: "foo/bar/./.", first: "foo", remainder: "bar/./."}, + {path: "foo/bar/./..", first: "foo", remainder: "bar/./.."}, + {path: "foo/bar/..", first: "foo", remainder: "bar/.."}, + {path: "foo/bar/../", first: "foo", remainder: "bar/.."}, + {path: "foo/bar/../.", first: "foo", remainder: "bar/../."}, + {path: "foo/bar/../..", first: "foo", remainder: "bar/../.."}, + {path: "foo/bar/", first: "foo", remainder: "bar"}, + {path: "foo/bar/.", first: "foo", remainder: "bar/."}, + } + + for _, c := range cases { + first, remainder := SplitFirst(c.path) + if first != c.first || remainder != c.remainder { + t.Errorf("SplitFirst(%q) got (%q, %q), expected (%q, %q)", c.path, first, remainder, c.first, c.remainder) + } + } +} + +// TestIsSubpath tests the IsSubpath method. +func TestIsSubpath(t *testing.T) { + tcs := []struct { + // Two absolute paths. + pathA string + pathB string + + // Whether pathA is a subpath of pathB. + wantIsSubpath bool + + // Relative path from pathA to pathB. Only checked if + // wantIsSubpath is true. + wantRelpath string + }{ + { + pathA: "/foo/bar/baz", + pathB: "/foo", + wantIsSubpath: true, + wantRelpath: "bar/baz", + }, + { + pathA: "/foo", + pathB: "/foo/bar/baz", + wantIsSubpath: false, + }, + { + pathA: "/foo", + pathB: "/foo", + wantIsSubpath: false, + }, + { + pathA: "/foobar", + pathB: "/foo", + wantIsSubpath: false, + }, + { + pathA: "/foo", + pathB: "/foobar", + wantIsSubpath: false, + }, + { + pathA: "/foo", + pathB: "/foobar", + wantIsSubpath: false, + }, + { + pathA: "/", + pathB: "/foo", + wantIsSubpath: false, + }, + { + pathA: "/foo", + pathB: "/", + wantIsSubpath: true, + wantRelpath: "foo", + }, + { + pathA: "/foo/bar/../bar", + pathB: "/foo", + wantIsSubpath: true, + wantRelpath: "bar", + }, + { + pathA: "/foo/bar", + pathB: "/foo/../foo", + wantIsSubpath: true, + wantRelpath: "bar", + }, + } + + for _, tc := range tcs { + gotRelpath, gotIsSubpath := IsSubpath(tc.pathA, tc.pathB) + if gotRelpath != tc.wantRelpath || gotIsSubpath != tc.wantIsSubpath { + t.Errorf("IsSubpath(%q, %q) got %q %t, want %q %t", tc.pathA, tc.pathB, gotRelpath, gotIsSubpath, tc.wantRelpath, tc.wantIsSubpath) + } + } +} diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD new file mode 100644 index 000000000..77c2c5c0e --- /dev/null +++ b/pkg/sentry/fs/proc/BUILD @@ -0,0 +1,72 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "proc", + srcs = [ + "cgroup.go", + "cpuinfo.go", + "exec_args.go", + "fds.go", + "filesystems.go", + "fs.go", + "inode.go", + "loadavg.go", + "meminfo.go", + "mounts.go", + "net.go", + "proc.go", + "stat.go", + "sys.go", + "sys_net.go", + "sys_net_state.go", + "task.go", + "uid_gid_map.go", + "uptime.go", + "version.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/log", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/proc/device", + "//pkg/sentry/fs/proc/seqfile", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/fsbridge", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/mm", + "//pkg/sentry/socket", + "//pkg/sentry/socket/unix", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/usage", + "//pkg/sync", + "//pkg/syserror", + "//pkg/tcpip/header", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "proc_test", + size = "small", + srcs = [ + "net_test.go", + "sys_net_test.go", + ], + library = ":proc", + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/inet", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/proc/README.md b/pkg/sentry/fs/proc/README.md new file mode 100644 index 000000000..6667a0916 --- /dev/null +++ b/pkg/sentry/fs/proc/README.md @@ -0,0 +1,336 @@ +This document tracks what is implemented in procfs. Refer to +Documentation/filesystems/proc.txt in the Linux project for information about +procfs generally. + +**NOTE**: This document is not guaranteed to be up to date. If you find an +inconsistency, please file a bug. + +[TOC] + +## Kernel data + +The following files are implemented: + +<!-- mdformat off(don't wrap the table) --> + +| File /proc/ | Content | +| :------------------------ | :---------------------------------------------------- | +| [cpuinfo](#cpuinfo) | Info about the CPU | +| [filesystems](#filesystems) | Supported filesystems | +| [loadavg](#loadavg) | Load average of last 1, 5 & 15 minutes | +| [meminfo](#meminfo) | Overall memory info | +| [stat](#stat) | Overall kernel statistics | +| [sys](#sys) | Change parameters within the kernel | +| [uptime](#uptime) | Wall clock since boot, combined idle time of all cpus | +| [version](#version) | Kernel version | + +<!-- mdformat on --> + +### cpuinfo + +```bash +$ cat /proc/cpuinfo +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : unknown +stepping : unknown +cpu MHz : 1234.588 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx xsaveopt +bogomips : 1234.59 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +... +``` + +Notable divergences: + +Field name | Notes +:--------------- | :--------------------------------------- +model name | Always unknown +stepping | Always unknown +fpu | Always yes +fpu_exception | Always yes +wp | Always yes +bogomips | Bogus value (matches cpu MHz) +clflush size | Always 64 +cache_alignment | Always 64 +address sizes | Always 46 bits physical, 48 bits virtual +power management | Always blank + +Otherwise fields are derived from the sentry configuration. + +### filesystems + +```bash +$ cat /proc/filesystems +nodev 9p +nodev devpts +nodev devtmpfs +nodev proc +nodev sysfs +nodev tmpfs +``` + +### loadavg + +```bash +$ cat /proc/loadavg +0.00 0.00 0.00 0/0 0 +``` + +Column | Notes +:------------------------------------ | :---------- +CPU.IO utilization in last 1 minute | Always zero +CPU.IO utilization in last 5 minutes | Always zero +CPU.IO utilization in last 10 minutes | Always zero +Num currently running processes | Always zero +Total num processes | Always zero + +TODO(b/62345059): Populate the columns with accurate statistics. + +### meminfo + +```bash +$ cat /proc/meminfo +MemTotal: 2097152 kB +MemFree: 2083540 kB +MemAvailable: 2083540 kB +Buffers: 0 kB +Cached: 4428 kB +SwapCache: 0 kB +Active: 10812 kB +Inactive: 2216 kB +Active(anon): 8600 kB +Inactive(anon): 0 kB +Active(file): 2212 kB +Inactive(file): 2216 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 8600 kB +Mapped: 4428 kB +Shmem: 0 kB + +``` + +Notable divergences: + +Field name | Notes +:---------------- | :----------------------------------------------------- +Buffers | Always zero, no block devices +SwapCache | Always zero, no swap +Inactive(anon) | Always zero, see SwapCache +Unevictable | Always zero TODO(b/31823263) +Mlocked | Always zero TODO(b/31823263) +SwapTotal | Always zero, no swap +SwapFree | Always zero, no swap +Dirty | Always zero TODO(b/31823263) +Writeback | Always zero TODO(b/31823263) +MemAvailable | Uses the same value as MemFree since there is no swap. +Slab | Missing +SReclaimable | Missing +SUnreclaim | Missing +KernelStack | Missing +PageTables | Missing +NFS_Unstable | Missing +Bounce | Missing +WritebackTmp | Missing +CommitLimit | Missing +Committed_AS | Missing +VmallocTotal | Missing +VmallocUsed | Missing +VmallocChunk | Missing +HardwareCorrupted | Missing +AnonHugePages | Missing +ShmemHugePages | Missing +ShmemPmdMapped | Missing +HugePages_Total | Missing +HugePages_Free | Missing +HugePages_Rsvd | Missing +HugePages_Surp | Missing +Hugepagesize | Missing +DirectMap4k | Missing +DirectMap2M | Missing +DirectMap1G | Missing + +### stat + +```bash +$ cat /proc/stat +cpu 0 0 0 0 0 0 0 0 0 0 +cpu0 0 0 0 0 0 0 0 0 0 0 +cpu1 0 0 0 0 0 0 0 0 0 0 +cpu2 0 0 0 0 0 0 0 0 0 0 +cpu3 0 0 0 0 0 0 0 0 0 0 +cpu4 0 0 0 0 0 0 0 0 0 0 +cpu5 0 0 0 0 0 0 0 0 0 0 +cpu6 0 0 0 0 0 0 0 0 0 0 +cpu7 0 0 0 0 0 0 0 0 0 0 +intr 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 0 +btime 1504040968 +processes 0 +procs_running 0 +procs_blokkcked 0 +softirq 0 0 0 0 0 0 0 0 0 0 0 +``` + +All fields except for `btime` are always zero. + +TODO(b/37226836): Populate with accurate fields. + +### sys + +```bash +$ ls /proc/sys +kernel vm +``` + +Directory | Notes +:-------- | :---------------------------- +abi | Missing +debug | Missing +dev | Missing +fs | Missing +kernel | Contains hostname (only) +net | Missing +user | Missing +vm | Contains mmap_min_addr (only) + +### uptime + +```bash +$ cat /proc/uptime +3204.62 0.00 +``` + +Column | Notes +:------------------------------- | :---------------------------- +Total num seconds system running | Time since procfs was mounted +Number of seconds idle | Always zero + +### version + +```bash +$ cat /proc/version +Linux version 4.4 #1 SMP Sun Jan 10 15:06:54 PST 2016 +``` + +## Process-specific data + +The following files are implemented: + +File /proc/PID | Content +:---------------------- | :--------------------------------------------------- +[auxv](#auxv) | Copy of auxiliary vector for the process +[cmdline](#cmdline) | Command line arguments +[comm](#comm) | Command name associated with the process +[environ](#environ) | Process environment +[exe](#exe) | Symlink to the process's executable +[fd](#fd) | Directory containing links to open file descriptors +[fdinfo](#fdinfo) | Information associated with open file descriptors +[gid_map](#gid_map) | Mappings for group IDs inside the user namespace +[io](#io) | IO statistics +[maps](#maps) | Memory mappings (anon, executables, library files) +[mounts](#mounts) | Mounted filesystems +[mountinfo](#mountinfo) | Information about mounts +[ns](#ns) | Directory containing info about supported namespaces +[stat](#stat) | Process statistics +[statm](#statm) | Process memory statistics +[status](#status) | Process status in human readable format +[task](#task) | Directory containing info about running threads +[uid_map](#uid_map) | Mappings for user IDs inside the user namespace + +### auxv + +TODO + +### cmdline + +TODO + +### comm + +TODO + +### environment + +TODO + +### exe + +TODO + +### fd + +TODO + +### fdinfo + +TODO + +### gid_map + +TODO + +### io + +Only has data for rchar, wchar, syscr, and syscw. + +TODO: add more detail. + +### maps + +TODO + +### mounts + +TODO + +### mountinfo + +TODO + +### ns + +TODO + +### stat + +Only has data for pid, comm, state, ppid, utime, stime, cutime, cstime, +num_threads, and exit_signal. + +TODO: add more detail. + +### statm + +Only has data for vss and rss. + +TODO: add more detail. + +### status + +Contains data for Name, State, Tgid, Pid, Ppid, TracerPid, FDSize, VmSize, +VmRSS, Threads, CapInh, CapPrm, CapEff, CapBnd, Seccomp. + +TODO: add more detail. + +### task + +TODO + +### uid_map + +TODO diff --git a/pkg/sentry/fs/proc/cgroup.go b/pkg/sentry/fs/proc/cgroup.go new file mode 100644 index 000000000..7c1d9e7e9 --- /dev/null +++ b/pkg/sentry/fs/proc/cgroup.go @@ -0,0 +1,45 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// LINT.IfChange + +func newCGroupInode(ctx context.Context, msrc *fs.MountSource, cgroupControllers map[string]string) *fs.Inode { + // From man 7 cgroups: "For each cgroup hierarchy of which the process + // is a member, there is one entry containing three colon-separated + // fields: hierarchy-ID:controller-list:cgroup-path" + + // The hierarchy ids must be positive integers (for cgroup v1), but the + // exact number does not matter, so long as they are unique. We can + // just use a counter, but since linux sorts this file in descending + // order, we must count down to perserve this behavior. + i := len(cgroupControllers) + var data string + for name, dir := range cgroupControllers { + data += fmt.Sprintf("%d:%s:%s\n", i, name, dir) + i-- + } + + return newStaticProcInode(ctx, msrc, []byte(data)) +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/cpuinfo.go b/pkg/sentry/fs/proc/cpuinfo.go new file mode 100644 index 000000000..c96533401 --- /dev/null +++ b/pkg/sentry/fs/proc/cpuinfo.go @@ -0,0 +1,41 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel" +) + +// LINT.IfChange + +func newCPUInfo(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + k := kernel.KernelFromContext(ctx) + features := k.FeatureSet() + if features == nil { + // Kernel is always initialized with a FeatureSet. + panic("cpuinfo read with nil FeatureSet") + } + var buf bytes.Buffer + for i, max := uint(0), k.ApplicationCores(); i < max; i++ { + features.WriteCPUInfoTo(i, &buf) + } + return newStaticProcInode(ctx, msrc, buf.Bytes()) +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/device/BUILD b/pkg/sentry/fs/proc/device/BUILD new file mode 100644 index 000000000..52c9aa93d --- /dev/null +++ b/pkg/sentry/fs/proc/device/BUILD @@ -0,0 +1,10 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "device", + srcs = ["device.go"], + visibility = ["//pkg/sentry:internal"], + deps = ["//pkg/sentry/device"], +) diff --git a/pkg/sentry/fs/proc/device/device.go b/pkg/sentry/fs/proc/device/device.go new file mode 100644 index 000000000..bbe66e796 --- /dev/null +++ b/pkg/sentry/fs/proc/device/device.go @@ -0,0 +1,23 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package device contains the proc device to avoid dependency loops. +package device + +import ( + "gvisor.dev/gvisor/pkg/sentry/device" +) + +// ProcDevice is the kernel proc device. +var ProcDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go new file mode 100644 index 000000000..8fe626e1c --- /dev/null +++ b/pkg/sentry/fs/proc/exec_args.go @@ -0,0 +1,207 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// execArgType enumerates the types of exec arguments that are exposed through +// proc. +type execArgType int + +const ( + cmdlineExecArg execArgType = iota + environExecArg +) + +// execArgInode is a inode containing the exec args (either cmdline or environ) +// for a given task. +// +// +stateify savable +type execArgInode struct { + fsutil.SimpleFileInode + + // arg is the type of exec argument this file contains. + arg execArgType + + // t is the Task to read the exec arg line from. + t *kernel.Task +} + +var _ fs.InodeOperations = (*execArgInode)(nil) + +// newExecArgFile creates a file containing the exec args of the given type. +func newExecArgInode(t *kernel.Task, msrc *fs.MountSource, arg execArgType) *fs.Inode { + if arg != cmdlineExecArg && arg != environExecArg { + panic(fmt.Sprintf("unknown exec arg type %v", arg)) + } + f := &execArgInode{ + SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + arg: arg, + t: t, + } + return newProcInode(t, f, msrc, fs.SpecialFile, t) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *execArgInode) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &execArgFile{ + arg: i.arg, + t: i.t, + }), nil +} + +// +stateify savable +type execArgFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopWrite `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + // arg is the type of exec argument this file contains. + arg execArgType + + // t is the Task to read the exec arg line from. + t *kernel.Task +} + +var _ fs.FileOperations = (*execArgFile)(nil) + +// Read reads the exec arg from the process's address space.. +func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + m, err := getTaskMM(f.t) + if err != nil { + return 0, err + } + defer m.DecUsers(ctx) + + // Figure out the bounds of the exec arg we are trying to read. + var execArgStart, execArgEnd usermem.Addr + switch f.arg { + case cmdlineExecArg: + execArgStart, execArgEnd = m.ArgvStart(), m.ArgvEnd() + case environExecArg: + execArgStart, execArgEnd = m.EnvvStart(), m.EnvvEnd() + default: + panic(fmt.Sprintf("unknown exec arg type %v", f.arg)) + } + if execArgStart == 0 || execArgEnd == 0 { + // Don't attempt to read before the start/end are set up. + return 0, io.EOF + } + + start, ok := execArgStart.AddLength(uint64(offset)) + if !ok { + return 0, io.EOF + } + if start >= execArgEnd { + return 0, io.EOF + } + + length := int(execArgEnd - start) + if dstlen := dst.NumBytes(); int64(length) > dstlen { + length = int(dstlen) + } + + buf := make([]byte, length) + // N.B. Technically this should be usermem.IOOpts.IgnorePermissions = true + // until Linux 4.9 (272ddc8b3735 "proc: don't use FOLL_FORCE for reading + // cmdline and environment"). + copyN, err := m.CopyIn(ctx, start, buf, usermem.IOOpts{}) + if copyN == 0 { + // Nothing to copy. + return 0, err + } + buf = buf[:copyN] + + // On Linux, if the NUL byte at the end of the argument vector has been + // overwritten, it continues reading the environment vector as part of + // the argument vector. + + if f.arg == cmdlineExecArg && buf[copyN-1] != 0 { + // Linux will limit the return up to and including the first null character in argv + + copyN = bytes.IndexByte(buf, 0) + if copyN == -1 { + copyN = len(buf) + } + // If we found a NUL character in argv, return upto and including that character. + if copyN < len(buf) { + buf = buf[:copyN] + } else { // Otherwise return into envp. + lengthEnvv := int(m.EnvvEnd() - m.EnvvStart()) + + // Upstream limits the returned amount to one page of slop. + // https://elixir.bootlin.com/linux/v4.20/source/fs/proc/base.c#L208 + // we'll return one page total between argv and envp because of the + // above page restrictions. + if lengthEnvv > usermem.PageSize-len(buf) { + lengthEnvv = usermem.PageSize - len(buf) + } + // Make a new buffer to fit the whole thing + tmp := make([]byte, length+lengthEnvv) + copyNE, err := m.CopyIn(ctx, m.EnvvStart(), tmp[copyN:], usermem.IOOpts{}) + if err != nil { + return 0, err + } + + // Linux will return envp up to and including the first NUL character, so find it. + for i, c := range tmp[copyN:] { + if c == 0 { + copyNE = i + break + } + } + + copy(tmp, buf) + buf = tmp[:copyN+copyNE] + + } + + } + + n, dstErr := dst.CopyOut(ctx, buf) + if dstErr != nil { + return int64(n), dstErr + } + return int64(n), err +} + +// LINT.ThenChange(../../fsimpl/proc/task.go) diff --git a/pkg/sentry/fs/proc/fds.go b/pkg/sentry/fs/proc/fds.go new file mode 100644 index 000000000..35972e23c --- /dev/null +++ b/pkg/sentry/fs/proc/fds.go @@ -0,0 +1,283 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "sort" + "strconv" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/syserror" +) + +// LINT.IfChange + +// walkDescriptors finds the descriptor (file-flag pair) for the fd identified +// by p, and calls the toInodeOperations callback with that descriptor. This is a helper +// method for implementing fs.InodeOperations.Lookup. +func walkDescriptors(t *kernel.Task, p string, toInode func(*fs.File, kernel.FDFlags) *fs.Inode) (*fs.Inode, error) { + n, err := strconv.ParseUint(p, 10, 64) + if err != nil { + // Not found. + return nil, syserror.ENOENT + } + + var file *fs.File + var fdFlags kernel.FDFlags + t.WithMuLocked(func(t *kernel.Task) { + if fdTable := t.FDTable(); fdTable != nil { + file, fdFlags = fdTable.Get(int32(n)) + } + }) + if file == nil { + return nil, syserror.ENOENT + } + return toInode(file, fdFlags), nil +} + +// readDescriptors reads fds in the task starting at offset, and calls the +// toDentAttr callback for each to get a DentAttr, which it then emits. This is +// a helper for implementing fs.InodeOperations.Readdir. +func readDescriptors(t *kernel.Task, c *fs.DirCtx, offset int64, toDentAttr func(int) fs.DentAttr) (int64, error) { + var fds []int32 + t.WithMuLocked(func(t *kernel.Task) { + if fdTable := t.FDTable(); fdTable != nil { + fds = fdTable.GetFDs() + } + }) + + // Find the appropriate starting point. + idx := sort.Search(len(fds), func(i int) bool { return fds[i] >= int32(offset) }) + if idx == len(fds) { + return offset, nil + } + fds = fds[idx:] + + // Serialize all FDs. + for _, fd := range fds { + name := strconv.FormatUint(uint64(fd), 10) + if err := c.DirEmit(name, toDentAttr(int(fd))); err != nil { + // Returned offset is the next fd to serialize. + return int64(fd), err + } + } + // We serialized them all. Next offset should be higher than last + // serialized fd. + return int64(fds[len(fds)-1] + 1), nil +} + +// fd implements fs.InodeOperations for a file in /proc/TID/fd/. +type fd struct { + ramfs.Symlink + file *fs.File +} + +var _ fs.InodeOperations = (*fd)(nil) + +// newFd returns a new fd based on an existing file. +// +// This inherits one reference to the file. +func newFd(t *kernel.Task, f *fs.File, msrc *fs.MountSource) *fs.Inode { + fd := &fd{ + // RootOwner overridden by taskOwnedInodeOps.UnstableAttrs(). + Symlink: *ramfs.NewSymlink(t, fs.RootOwner, ""), + file: f, + } + return newProcInode(t, fd, msrc, fs.Symlink, t) +} + +// GetFile returns the fs.File backing this fd. The dirent and flags +// arguments are ignored. +func (f *fd) GetFile(context.Context, *fs.Dirent, fs.FileFlags) (*fs.File, error) { + // Take a reference on the fs.File. + f.file.IncRef() + return f.file, nil +} + +// Readlink returns the current target. +func (f *fd) Readlink(ctx context.Context, _ *fs.Inode) (string, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + n, _ := f.file.Dirent.FullName(root) + return n, nil +} + +// Getlink implements fs.InodeOperations.Getlink. +func (f *fd) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + f.file.Dirent.IncRef() + return f.file.Dirent, nil +} + +// Truncate is ignored. +func (f *fd) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +func (f *fd) Release(ctx context.Context) { + f.Symlink.Release(ctx) + f.file.DecRef() +} + +// Close releases the reference on the file. +func (f *fd) Close() error { + f.file.DecRef() + return nil +} + +// fdDir is an InodeOperations for /proc/TID/fd. +// +// +stateify savable +type fdDir struct { + ramfs.Dir + + // We hold a reference on the task's FDTable but only keep an indirect + // task pointer to avoid Dirent loading circularity caused by the + // table's back pointers into the dirent tree. + t *kernel.Task +} + +var _ fs.InodeOperations = (*fdDir)(nil) + +// newFdDir creates a new fdDir. +func newFdDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + f := &fdDir{ + Dir: *ramfs.NewDir(t, nil, fs.RootOwner, fs.FilePermissions{User: fs.PermMask{Read: true, Execute: true}}), + t: t, + } + return newProcInode(t, f, msrc, fs.SpecialDirectory, t) +} + +// Check implements InodeOperations.Check. +// +// This is to match Linux, which uses a special permission handler to guarantee +// that a process can still access /proc/self/fd after it has executed +// setuid. See fs/proc/fd.c:proc_fd_permission. +func (f *fdDir) Check(ctx context.Context, inode *fs.Inode, req fs.PermMask) bool { + if fs.ContextCanAccessFile(ctx, inode, req) { + return true + } + if t := kernel.TaskFromContext(ctx); t != nil { + // Allow access if the task trying to access it is in the + // thread group corresponding to this directory. + if f.t.ThreadGroup() == t.ThreadGroup() { + return true + } + } + return false +} + +// Lookup loads an Inode in /proc/TID/fd into a Dirent. +func (f *fdDir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + n, err := walkDescriptors(f.t, p, func(file *fs.File, _ kernel.FDFlags) *fs.Inode { + return newFd(f.t, file, dir.MountSource) + }) + if err != nil { + return nil, err + } + return fs.NewDirent(ctx, n, p), nil +} + +// GetFile implements fs.FileOperations.GetFile. +func (f *fdDir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + fops := &fdDirFile{ + isInfoFile: false, + t: f.t, + } + return fs.NewFile(ctx, dirent, flags, fops), nil +} + +// +stateify savable +type fdDirFile struct { + fsutil.DirFileOperations `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + isInfoFile bool + + t *kernel.Task +} + +var _ fs.FileOperations = (*fdDirFile)(nil) + +// Readdir implements fs.FileOperations.Readdir. +func (f *fdDirFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) { + dirCtx := &fs.DirCtx{ + Serializer: ser, + } + typ := fs.RegularFile + if f.isInfoFile { + typ = fs.Symlink + } + return readDescriptors(f.t, dirCtx, file.Offset(), func(fd int) fs.DentAttr { + return fs.GenericDentAttr(typ, device.ProcDevice) + }) +} + +// fdInfoDir implements /proc/TID/fdinfo. It embeds an fdDir, but overrides +// Lookup and Readdir. +// +// +stateify savable +type fdInfoDir struct { + ramfs.Dir + + t *kernel.Task +} + +// newFdInfoDir creates a new fdInfoDir. +func newFdInfoDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + fdid := &fdInfoDir{ + Dir: *ramfs.NewDir(t, nil, fs.RootOwner, fs.FilePermsFromMode(0500)), + t: t, + } + return newProcInode(t, fdid, msrc, fs.SpecialDirectory, t) +} + +// Lookup loads an fd in /proc/TID/fdinfo into a Dirent. +func (fdid *fdInfoDir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + inode, err := walkDescriptors(fdid.t, p, func(file *fs.File, fdFlags kernel.FDFlags) *fs.Inode { + // TODO(b/121266871): Using a static inode here means that the + // data can be out-of-date if, for instance, the flags on the + // FD change before we read this file. We should switch to + // generating the data on Read(). Also, we should include pos, + // locks, and other data. For now we only have flags. + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + flags := file.Flags().ToLinux() | fdFlags.ToLinuxFileFlags() + file.DecRef() + contents := []byte(fmt.Sprintf("flags:\t0%o\n", flags)) + return newStaticProcInode(ctx, dir.MountSource, contents) + }) + if err != nil { + return nil, err + } + return fs.NewDirent(ctx, inode, p), nil +} + +// GetFile implements fs.FileOperations.GetFile. +func (fdid *fdInfoDir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + fops := &fdDirFile{ + isInfoFile: true, + t: fdid.t, + } + return fs.NewFile(ctx, dirent, flags, fops), nil +} + +// LINT.ThenChange(../../fsimpl/proc/task_files.go) diff --git a/pkg/sentry/fs/proc/filesystems.go b/pkg/sentry/fs/proc/filesystems.go new file mode 100644 index 000000000..0a58ac34c --- /dev/null +++ b/pkg/sentry/fs/proc/filesystems.go @@ -0,0 +1,65 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" +) + +// LINT.IfChange + +// filesystemsData backs /proc/filesystems. +// +// +stateify savable +type filesystemsData struct{} + +// NeedsUpdate returns true on the first generation. The set of registered file +// systems doesn't change so there's no need to generate SeqData more than once. +func (*filesystemsData) NeedsUpdate(generation int64) bool { + return generation == 0 +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (*filesystemsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + // We don't ever expect to see a non-nil SeqHandle. + if h != nil { + return nil, 0 + } + + // Generate the file contents. + var buf bytes.Buffer + for _, sys := range fs.GetFilesystems() { + if !sys.AllowUserList() { + continue + } + nodev := "nodev" + if sys.Flags()&fs.FilesystemRequiresDev != 0 { + nodev = "" + } + // Matches the format of fs/filesystems.c:filesystems_proc_show. + fmt.Fprintf(&buf, "%s\t%s\n", nodev, sys.Name()) + } + + // Return the SeqData and advance the generation counter. + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*filesystemsData)(nil)}}, 1 +} + +// LINT.ThenChange(../../fsimpl/proc/filesystem.go) diff --git a/pkg/sentry/fs/proc/fs.go b/pkg/sentry/fs/proc/fs.go new file mode 100644 index 000000000..daf1ba781 --- /dev/null +++ b/pkg/sentry/fs/proc/fs.go @@ -0,0 +1,85 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// LINT.IfChange + +// filesystem is a procfs. +// +// +stateify savable +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// Name matches fs/proc/root.c:proc_fs_type.name. +const FilesystemName = "proc" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, proc returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/proc/root.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns the root of a procfs that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, cgroupsInt interface{}) (*fs.Inode, error) { + // device is always ignored. + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Proc options parsing checks for either a gid= or hidepid= and barfs on + // anything else, see fs/proc/root.c:proc_parse_options. Since we don't know + // what to do with gid= or hidepid=, we blow up if we get any options. + if len(options) > 0 { + return nil, fmt.Errorf("unsupported mount options: %v", options) + } + + var cgroups map[string]string + if cgroupsInt != nil { + cgroups = cgroupsInt.(map[string]string) + } + + // Construct the procfs root. Since procfs files are all virtual, we + // never want them cached. + return New(ctx, fs.NewNonCachingMountSource(ctx, f, flags), cgroups) +} + +// LINT.ThenChange(../../fsimpl/proc/filesystem.go) diff --git a/pkg/sentry/fs/proc/inode.go b/pkg/sentry/fs/proc/inode.go new file mode 100644 index 000000000..d2859a4c2 --- /dev/null +++ b/pkg/sentry/fs/proc/inode.go @@ -0,0 +1,137 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/mm" + "gvisor.dev/gvisor/pkg/usermem" +) + +// LINT.IfChange + +// taskOwnedInodeOps wraps an fs.InodeOperations and overrides the UnstableAttr +// method to return either the task or root as the owner, depending on the +// task's dumpability. +// +// +stateify savable +type taskOwnedInodeOps struct { + fs.InodeOperations + + // t is the task that owns this file. + t *kernel.Task +} + +// UnstableAttr implement fs.InodeOperations.UnstableAttr. +func (i *taskOwnedInodeOps) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := i.InodeOperations.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + + // By default, set the task owner as the file owner. + creds := i.t.Credentials() + uattr.Owner = fs.FileOwner{creds.EffectiveKUID, creds.EffectiveKGID} + + // Linux doesn't apply dumpability adjustments to world + // readable/executable directories so that applications can stat + // /proc/PID to determine the effective UID of a process. See + // fs/proc/base.c:task_dump_owner. + if fs.IsDir(inode.StableAttr) && uattr.Perms == fs.FilePermsFromMode(0555) { + return uattr, nil + } + + // If the task is not dumpable, then root (in the namespace preferred) + // owns the file. + var m *mm.MemoryManager + i.t.WithMuLocked(func(t *kernel.Task) { + m = t.MemoryManager() + }) + + if m == nil { + uattr.Owner.UID = auth.RootKUID + uattr.Owner.GID = auth.RootKGID + } else if m.Dumpability() != mm.UserDumpable { + if kuid := creds.UserNamespace.MapToKUID(auth.RootUID); kuid.Ok() { + uattr.Owner.UID = kuid + } else { + uattr.Owner.UID = auth.RootKUID + } + if kgid := creds.UserNamespace.MapToKGID(auth.RootGID); kgid.Ok() { + uattr.Owner.GID = kgid + } else { + uattr.Owner.GID = auth.RootKGID + } + } + + return uattr, nil +} + +// staticFileInodeOps is an InodeOperations implementation that can be used to +// return file contents which are constant. This file is not writable and will +// always have mode 0444. +// +// +stateify savable +type staticFileInodeOps struct { + fsutil.InodeDenyWriteChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopAllocate `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeStaticFileGetter +} + +var _ fs.InodeOperations = (*staticFileInodeOps)(nil) + +// newStaticFileInode returns a procfs InodeOperations with static contents. +func newStaticProcInode(ctx context.Context, msrc *fs.MountSource, contents []byte) *fs.Inode { + iops := &staticFileInodeOps{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + InodeStaticFileGetter: fsutil.InodeStaticFileGetter{ + Contents: contents, + }, + } + return newProcInode(ctx, iops, msrc, fs.SpecialFile, nil) +} + +// newProcInode creates a new inode from the given inode operations. +func newProcInode(ctx context.Context, iops fs.InodeOperations, msrc *fs.MountSource, typ fs.InodeType, t *kernel.Task) *fs.Inode { + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: typ, + } + if t != nil { + iops = &taskOwnedInodeOps{iops, t} + } + return fs.NewInode(ctx, iops, msrc, sattr) +} + +// LINT.ThenChange(../../fsimpl/proc/tasks.go) diff --git a/pkg/sentry/fs/proc/loadavg.go b/pkg/sentry/fs/proc/loadavg.go new file mode 100644 index 000000000..139d49c34 --- /dev/null +++ b/pkg/sentry/fs/proc/loadavg.go @@ -0,0 +1,59 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" +) + +// LINT.IfChange + +// loadavgData backs /proc/loadavg. +// +// +stateify savable +type loadavgData struct{} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*loadavgData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (d *loadavgData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + // TODO(b/62345059): Include real data in fields. + // Column 1-3: CPU and IO utilization of the last 1, 5, and 10 minute periods. + // Column 4-5: currently running processes and the total number of processes. + // Column 6: the last process ID used. + fmt.Fprintf(&buf, "%.2f %.2f %.2f %d/%d %d\n", 0.00, 0.00, 0.00, 0, 0, 0) + + return []seqfile.SeqData{ + { + Buf: buf.Bytes(), + Handle: (*loadavgData)(nil), + }, + }, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go new file mode 100644 index 000000000..91617267d --- /dev/null +++ b/pkg/sentry/fs/proc/meminfo.go @@ -0,0 +1,93 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/usermem" +) + +// LINT.IfChange + +// meminfoData backs /proc/meminfo. +// +// +stateify savable +type meminfoData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*meminfoData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (d *meminfoData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + mf := d.k.MemoryFile() + mf.UpdateUsage() + snapshot, totalUsage := usage.MemoryAccounting.Copy() + totalSize := usage.TotalMemory(mf.TotalSize(), totalUsage) + anon := snapshot.Anonymous + snapshot.Tmpfs + file := snapshot.PageCache + snapshot.Mapped + // We don't actually have active/inactive LRUs, so just make up numbers. + activeFile := (file / 2) &^ (usermem.PageSize - 1) + inactiveFile := file - activeFile + + var buf bytes.Buffer + fmt.Fprintf(&buf, "MemTotal: %8d kB\n", totalSize/1024) + memFree := totalSize - totalUsage + if memFree > totalSize { + // Underflow. + memFree = 0 + } + // We use MemFree as MemAvailable because we don't swap. + // TODO(rahat): When reclaim is implemented the value of MemAvailable + // should change. + fmt.Fprintf(&buf, "MemFree: %8d kB\n", memFree/1024) + fmt.Fprintf(&buf, "MemAvailable: %8d kB\n", memFree/1024) + fmt.Fprintf(&buf, "Buffers: 0 kB\n") // memory usage by block devices + fmt.Fprintf(&buf, "Cached: %8d kB\n", (file+snapshot.Tmpfs)/1024) + // Emulate a system with no swap, which disables inactivation of anon pages. + fmt.Fprintf(&buf, "SwapCache: 0 kB\n") + fmt.Fprintf(&buf, "Active: %8d kB\n", (anon+activeFile)/1024) + fmt.Fprintf(&buf, "Inactive: %8d kB\n", inactiveFile/1024) + fmt.Fprintf(&buf, "Active(anon): %8d kB\n", anon/1024) + fmt.Fprintf(&buf, "Inactive(anon): 0 kB\n") + fmt.Fprintf(&buf, "Active(file): %8d kB\n", activeFile/1024) + fmt.Fprintf(&buf, "Inactive(file): %8d kB\n", inactiveFile/1024) + fmt.Fprintf(&buf, "Unevictable: 0 kB\n") // TODO(b/31823263) + fmt.Fprintf(&buf, "Mlocked: 0 kB\n") // TODO(b/31823263) + fmt.Fprintf(&buf, "SwapTotal: 0 kB\n") + fmt.Fprintf(&buf, "SwapFree: 0 kB\n") + fmt.Fprintf(&buf, "Dirty: 0 kB\n") + fmt.Fprintf(&buf, "Writeback: 0 kB\n") + fmt.Fprintf(&buf, "AnonPages: %8d kB\n", anon/1024) + fmt.Fprintf(&buf, "Mapped: %8d kB\n", file/1024) // doesn't count mapped tmpfs, which we don't know + fmt.Fprintf(&buf, "Shmem: %8d kB\n", snapshot.Tmpfs/1024) + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*meminfoData)(nil)}}, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/mounts.go b/pkg/sentry/fs/proc/mounts.go new file mode 100644 index 000000000..1fc9c703c --- /dev/null +++ b/pkg/sentry/fs/proc/mounts.go @@ -0,0 +1,232 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/kernel" +) + +// LINT.IfChange + +// forEachMountSource runs f for the process root mount and each mount that is a +// descendant of the root. +func forEachMount(t *kernel.Task, fn func(string, *fs.Mount)) { + var fsctx *kernel.FSContext + t.WithMuLocked(func(t *kernel.Task) { + fsctx = t.FSContext() + }) + if fsctx == nil { + // The task has been destroyed. Nothing to show here. + return + } + + // All mount points must be relative to the rootDir, and mounts outside + // will be excluded. + rootDir := fsctx.RootDirectory() + if rootDir == nil { + // The task has been destroyed. Nothing to show here. + return + } + defer rootDir.DecRef() + + mnt := t.MountNamespace().FindMount(rootDir) + if mnt == nil { + // Has it just been unmounted? + return + } + ms := t.MountNamespace().AllMountsUnder(mnt) + sort.Slice(ms, func(i, j int) bool { + return ms[i].ID < ms[j].ID + }) + for _, m := range ms { + mroot := m.Root() + if mroot == nil { + continue // No longer valid. + } + mountPath, desc := mroot.FullName(rootDir) + mroot.DecRef() + if !desc { + // MountSources that are not descendants of the chroot jail are ignored. + continue + } + fn(mountPath, m) + } +} + +// mountInfoFile is used to implement /proc/[pid]/mountinfo. +// +// +stateify savable +type mountInfoFile struct { + t *kernel.Task +} + +// NeedsUpdate implements SeqSource.NeedsUpdate. +func (mif *mountInfoFile) NeedsUpdate(_ int64) bool { + return true +} + +// ReadSeqFileData implements SeqSource.ReadSeqFileData. +func (mif *mountInfoFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if handle != nil { + return nil, 0 + } + + var buf bytes.Buffer + forEachMount(mif.t, func(mountPath string, m *fs.Mount) { + mroot := m.Root() + if mroot == nil { + return // No longer valid. + } + defer mroot.DecRef() + + // Format: + // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + // (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + // (1) MountSource ID. + fmt.Fprintf(&buf, "%d ", m.ID) + + // (2) Parent ID (or this ID if there is no parent). + pID := m.ID + if !m.IsRoot() && !m.IsUndo() { + pID = m.ParentID + } + fmt.Fprintf(&buf, "%d ", pID) + + // (3) Major:Minor device ID. We don't have a superblock, so we + // just use the root inode device number. + sa := mroot.Inode.StableAttr + fmt.Fprintf(&buf, "%d:%d ", sa.DeviceFileMajor, sa.DeviceFileMinor) + + // (4) Root: the pathname of the directory in the filesystem + // which forms the root of this mount. + // + // NOTE(b/78135857): This will always be "/" until we implement + // bind mounts. + fmt.Fprintf(&buf, "/ ") + + // (5) Mount point (relative to process root). + fmt.Fprintf(&buf, "%s ", mountPath) + + // (6) Mount options. + flags := mroot.Inode.MountSource.Flags + opts := "rw" + if flags.ReadOnly { + opts = "ro" + } + if flags.NoAtime { + opts += ",noatime" + } + if flags.NoExec { + opts += ",noexec" + } + fmt.Fprintf(&buf, "%s ", opts) + + // (7) Optional fields: zero or more fields of the form "tag[:value]". + // (8) Separator: the end of the optional fields is marked by a single hyphen. + fmt.Fprintf(&buf, "- ") + + // (9) Filesystem type. + fmt.Fprintf(&buf, "%s ", mroot.Inode.MountSource.FilesystemType) + + // (10) Mount source: filesystem-specific information or "none". + fmt.Fprintf(&buf, "none ") + + // (11) Superblock options, and final newline. + fmt.Fprintf(&buf, "%s\n", superBlockOpts(mountPath, mroot.Inode.MountSource)) + }) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*mountInfoFile)(nil)}}, 0 +} + +func superBlockOpts(mountPath string, msrc *fs.MountSource) string { + // gVisor doesn't (yet) have a concept of super block options, so we + // use the ro/rw bit from the mount flag. + opts := "rw" + if msrc.Flags.ReadOnly { + opts = "ro" + } + + // NOTE(b/147673608): If the mount is a cgroup, we also need to include + // the cgroup name in the options. For now we just read that from the + // path. + // + // TODO(gvisor.dev/issue/190): Once gVisor has full cgroup support, we + // should get this value from the cgroup itself, and not rely on the + // path. + if msrc.FilesystemType == "cgroup" { + splitPath := strings.Split(mountPath, "/") + cgroupType := splitPath[len(splitPath)-1] + opts += "," + cgroupType + } + return opts +} + +// mountsFile is used to implement /proc/[pid]/mounts. +// +// +stateify savable +type mountsFile struct { + t *kernel.Task +} + +// NeedsUpdate implements SeqSource.NeedsUpdate. +func (mf *mountsFile) NeedsUpdate(_ int64) bool { + return true +} + +// ReadSeqFileData implements SeqSource.ReadSeqFileData. +func (mf *mountsFile) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if handle != nil { + return nil, 0 + } + + var buf bytes.Buffer + forEachMount(mf.t, func(mountPath string, m *fs.Mount) { + // Format: + // <special device or remote filesystem> <mount point> <filesystem type> <mount options> <needs dump> <fsck order> + // + // We use the filesystem name as the first field, since there + // is no real block device we can point to, and we also should + // not expose anything about the remote filesystem. + // + // Only ro/rw option is supported for now. + // + // The "needs dump"and fsck flags are always 0, which is allowed. + root := m.Root() + if root == nil { + return // No longer valid. + } + defer root.DecRef() + + flags := root.Inode.MountSource.Flags + opts := "rw" + if flags.ReadOnly { + opts = "ro" + } + fmt.Fprintf(&buf, "%s %s %s %s %d %d\n", "none", mountPath, root.Inode.MountSource.FilesystemType, opts, 0, 0) + }) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*mountsFile)(nil)}}, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go new file mode 100644 index 000000000..bd18177d4 --- /dev/null +++ b/pkg/sentry/fs/proc/net.go @@ -0,0 +1,841 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io" + "reflect" + "time" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/inet" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/socket" + "gvisor.dev/gvisor/pkg/sentry/socket/unix" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/usermem" +) + +// LINT.IfChange + +// newNetDir creates a new proc net entry. +func newNetDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + k := t.Kernel() + + var contents map[string]*fs.Inode + if s := t.NetworkNamespace().Stack(); s != nil { + // TODO(gvisor.dev/issue/1833): Make sure file contents reflect the task + // network namespace. + contents = map[string]*fs.Inode{ + "dev": seqfile.NewSeqFileInode(t, &netDev{s: s}, msrc), + "snmp": seqfile.NewSeqFileInode(t, &netSnmp{s: s}, msrc), + + // The following files are simple stubs until they are + // implemented in netstack, if the file contains a + // header the stub is just the header otherwise it is + // an empty file. + "arp": newStaticProcInode(t, msrc, []byte("IP address HW type Flags HW address Mask Device\n")), + + "netlink": newStaticProcInode(t, msrc, []byte("sk Eth Pid Groups Rmem Wmem Dump Locks Drops Inode\n")), + "netstat": newStaticProcInode(t, msrc, []byte("TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPLossProbes TCPLossProbeRecovery TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPMD5Failure TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPRetransFail TCPRcvCoalesce TCPOFOQueue TCPOFODrop TCPOFOMerge TCPChallengeACK TCPSYNChallenge TCPFastOpenActive TCPFastOpenActiveFail TCPFastOpenPassive TCPFastOpenPassiveFail TCPFastOpenListenOverflow TCPFastOpenCookieReqd TCPSpuriousRtxHostQueues BusyPollRxPackets TCPAutoCorking TCPFromZeroWindowAdv TCPToZeroWindowAdv TCPWantZeroWindowAdv TCPSynRetrans TCPOrigDataSent TCPHystartTrainDetect TCPHystartTrainCwnd TCPHystartDelayDetect TCPHystartDelayCwnd TCPACKSkippedSynRecv TCPACKSkippedPAWS TCPACKSkippedSeq TCPACKSkippedFinWait2 TCPACKSkippedTimeWait TCPACKSkippedChallenge TCPWinProbe TCPKeepAlive TCPMTUPFail TCPMTUPSuccess\n")), + "packet": newStaticProcInode(t, msrc, []byte("sk RefCnt Type Proto Iface R Rmem User Inode\n")), + "protocols": newStaticProcInode(t, msrc, []byte("protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n")), + // Linux sets psched values to: nsec per usec, psched + // tick in ns, 1000000, high res timer ticks per sec + // (ClockGetres returns 1ns resolution). + "psched": newStaticProcInode(t, msrc, []byte(fmt.Sprintf("%08x %08x %08x %08x\n", uint64(time.Microsecond/time.Nanosecond), 64, 1000000, uint64(time.Second/time.Nanosecond)))), + "ptype": newStaticProcInode(t, msrc, []byte("Type Device Function\n")), + "route": seqfile.NewSeqFileInode(t, &netRoute{s: s}, msrc), + "tcp": seqfile.NewSeqFileInode(t, &netTCP{k: k}, msrc), + "udp": seqfile.NewSeqFileInode(t, &netUDP{k: k}, msrc), + "unix": seqfile.NewSeqFileInode(t, &netUnix{k: k}, msrc), + } + + if s.SupportsIPv6() { + contents["if_inet6"] = seqfile.NewSeqFileInode(t, &ifinet6{s: s}, msrc) + contents["ipv6_route"] = newStaticProcInode(t, msrc, []byte("")) + contents["tcp6"] = seqfile.NewSeqFileInode(t, &netTCP6{k: k}, msrc) + contents["udp6"] = newStaticProcInode(t, msrc, []byte(" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n")) + } + } + d := ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(t, d, msrc, fs.SpecialDirectory, t) +} + +// ifinet6 implements seqfile.SeqSource for /proc/net/if_inet6. +// +// +stateify savable +type ifinet6 struct { + s inet.Stack +} + +func (n *ifinet6) contents() []string { + var lines []string + nics := n.s.Interfaces() + for id, naddrs := range n.s.InterfaceAddrs() { + nic, ok := nics[id] + if !ok { + // NIC was added after NICNames was called. We'll just + // ignore it. + continue + } + + for _, a := range naddrs { + // IPv6 only. + if a.Family != linux.AF_INET6 { + continue + } + + // Fields: + // IPv6 address displayed in 32 hexadecimal chars without colons + // Netlink device number (interface index) in hexadecimal (use nic id) + // Prefix length in hexadecimal + // Scope value (use 0) + // Interface flags + // Device name + lines = append(lines, fmt.Sprintf("%032x %02x %02x %02x %02x %8s\n", a.Addr, id, a.PrefixLen, 0, a.Flags, nic.Name)) + } + } + return lines +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*ifinet6) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *ifinet6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var data []seqfile.SeqData + for _, l := range n.contents() { + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*ifinet6)(nil)}) + } + + return data, 0 +} + +// netDev implements seqfile.SeqSource for /proc/net/dev. +// +// +stateify savable +type netDev struct { + s inet.Stack +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (n *netDev) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's +// net/core/net-procfs.c:dev_seq_show. +func (n *netDev) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + interfaces := n.s.Interfaces() + contents := make([]string, 2, 2+len(interfaces)) + // Add the table header. From net/core/net-procfs.c:dev_seq_show. + contents[0] = "Inter-| Receive | Transmit\n" + contents[1] = " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n" + + for _, i := range interfaces { + // Implements the same format as + // net/core/net-procfs.c:dev_seq_printf_stats. + var stats inet.StatDev + if err := n.s.Statistics(&stats, i.Name); err != nil { + log.Warningf("Failed to retrieve interface statistics for %v: %v", i.Name, err) + continue + } + l := fmt.Sprintf( + "%6s: %7d %7d %4d %4d %4d %5d %10d %9d %8d %7d %4d %4d %4d %5d %7d %10d\n", + i.Name, + // Received + stats[0], // bytes + stats[1], // packets + stats[2], // errors + stats[3], // dropped + stats[4], // fifo + stats[5], // frame + stats[6], // compressed + stats[7], // multicast + // Transmitted + stats[8], // bytes + stats[9], // packets + stats[10], // errors + stats[11], // dropped + stats[12], // fifo + stats[13], // frame + stats[14], // compressed + stats[15]) // multicast + contents = append(contents, l) + } + + var data []seqfile.SeqData + for _, l := range contents { + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netDev)(nil)}) + } + + return data, 0 +} + +// netSnmp implements seqfile.SeqSource for /proc/net/snmp. +// +// +stateify savable +type netSnmp struct { + s inet.Stack +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (n *netSnmp) NeedsUpdate(generation int64) bool { + return true +} + +type snmpLine struct { + prefix string + header string +} + +var snmp = []snmpLine{ + { + prefix: "Ip", + header: "Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates", + }, + { + prefix: "Icmp", + header: "InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps", + }, + { + prefix: "IcmpMsg", + }, + { + prefix: "Tcp", + header: "RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors", + }, + { + prefix: "Udp", + header: "InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti", + }, + { + prefix: "UdpLite", + header: "InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti", + }, +} + +func toSlice(a interface{}) []uint64 { + v := reflect.Indirect(reflect.ValueOf(a)) + return v.Slice(0, v.Len()).Interface().([]uint64) +} + +func sprintSlice(s []uint64) string { + if len(s) == 0 { + return "" + } + r := fmt.Sprint(s) + return r[1 : len(r)-1] // Remove "[]" introduced by fmt of slice. +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's +// net/core/net-procfs.c:dev_seq_show. +func (n *netSnmp) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + contents := make([]string, 0, len(snmp)*2) + types := []interface{}{ + &inet.StatSNMPIP{}, + &inet.StatSNMPICMP{}, + nil, // TODO(gvisor.dev/issue/628): Support IcmpMsg stats. + &inet.StatSNMPTCP{}, + &inet.StatSNMPUDP{}, + &inet.StatSNMPUDPLite{}, + } + for i, stat := range types { + line := snmp[i] + if stat == nil { + contents = append( + contents, + fmt.Sprintf("%s:\n", line.prefix), + fmt.Sprintf("%s:\n", line.prefix), + ) + continue + } + if err := n.s.Statistics(stat, line.prefix); err != nil { + if err == syserror.EOPNOTSUPP { + log.Infof("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err) + } else { + log.Warningf("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err) + } + } + var values string + if line.prefix == "Tcp" { + tcp := stat.(*inet.StatSNMPTCP) + // "Tcp" needs special processing because MaxConn is signed. RFC 2012. + values = fmt.Sprintf("%s %d %s", sprintSlice(tcp[:3]), int64(tcp[3]), sprintSlice(tcp[4:])) + } else { + values = sprintSlice(toSlice(stat)) + } + contents = append( + contents, + fmt.Sprintf("%s: %s\n", line.prefix, line.header), + fmt.Sprintf("%s: %s\n", line.prefix, values), + ) + } + + data := make([]seqfile.SeqData, 0, len(snmp)*2) + for _, l := range contents { + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netSnmp)(nil)}) + } + + return data, 0 +} + +// netRoute implements seqfile.SeqSource for /proc/net/route. +// +// +stateify savable +type netRoute struct { + s inet.Stack +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (n *netRoute) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +// See Linux's net/ipv4/fib_trie.c:fib_route_seq_show. +func (n *netRoute) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + interfaces := n.s.Interfaces() + contents := []string{"Iface\tDestination\tGateway\tFlags\tRefCnt\tUse\tMetric\tMask\tMTU\tWindow\tIRTT"} + for _, rt := range n.s.RouteTable() { + // /proc/net/route only includes ipv4 routes. + if rt.Family != linux.AF_INET { + continue + } + + // /proc/net/route does not include broadcast or multicast routes. + if rt.Type == linux.RTN_BROADCAST || rt.Type == linux.RTN_MULTICAST { + continue + } + + iface, ok := interfaces[rt.OutputInterface] + if !ok || iface.Name == "lo" { + continue + } + + var ( + gw uint32 + prefix uint32 + flags = linux.RTF_UP + ) + if len(rt.GatewayAddr) == header.IPv4AddressSize { + flags |= linux.RTF_GATEWAY + gw = usermem.ByteOrder.Uint32(rt.GatewayAddr) + } + if len(rt.DstAddr) == header.IPv4AddressSize { + prefix = usermem.ByteOrder.Uint32(rt.DstAddr) + } + l := fmt.Sprintf( + "%s\t%08X\t%08X\t%04X\t%d\t%d\t%d\t%08X\t%d\t%d\t%d", + iface.Name, + prefix, + gw, + flags, + 0, // RefCnt. + 0, // Use. + 0, // Metric. + (uint32(1)<<rt.DstLen)-1, + 0, // MTU. + 0, // Window. + 0, // RTT. + ) + contents = append(contents, l) + } + + var data []seqfile.SeqData + for _, l := range contents { + l = fmt.Sprintf("%-127s\n", l) + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netRoute)(nil)}) + } + + return data, 0 +} + +// netUnix implements seqfile.SeqSource for /proc/net/unix. +// +// +stateify savable +type netUnix struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*netUnix) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return []seqfile.SeqData{}, 0 + } + + var buf bytes.Buffer + for _, se := range n.k.ListSockets() { + s := se.Sock.Get() + if s == nil { + log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID) + continue + } + sfile := s.(*fs.File) + if family, _, _ := sfile.FileOperations.(socket.Socket).Type(); family != linux.AF_UNIX { + s.DecRef() + // Not a unix socket. + continue + } + sops := sfile.FileOperations.(*unix.SocketOperations) + + addr, err := sops.Endpoint().GetLocalAddress() + if err != nil { + log.Warningf("Failed to retrieve socket name from %+v: %v", sfile, err) + addr.Addr = "<unknown>" + } + + sockFlags := 0 + if ce, ok := sops.Endpoint().(transport.ConnectingEndpoint); ok { + if ce.Listening() { + // For unix domain sockets, linux reports a single flag + // value if the socket is listening, of __SO_ACCEPTCON. + sockFlags = linux.SO_ACCEPTCON + } + } + + // In the socket entry below, the value for the 'Num' field requires + // some consideration. Linux prints the address to the struct + // unix_sock representing a socket in the kernel, but may redact the + // value for unprivileged users depending on the kptr_restrict + // sysctl. + // + // One use for this field is to allow a privileged user to + // introspect into the kernel memory to determine information about + // a socket not available through procfs, such as the socket's peer. + // + // On gvisor, returning a pointer to our internal structures would + // be pointless, as it wouldn't match the memory layout for struct + // unix_sock, making introspection difficult. We could populate a + // struct unix_sock with the appropriate data, but even that + // requires consideration for which kernel version to emulate, as + // the definition of this struct changes over time. + // + // For now, we always redact this pointer. + fmt.Fprintf(&buf, "%#016p: %08X %08X %08X %04X %02X %5d", + (*unix.SocketOperations)(nil), // Num, pointer to kernel socket struct. + sfile.ReadRefs()-1, // RefCount, don't count our own ref. + 0, // Protocol, always 0 for UDS. + sockFlags, // Flags. + sops.Endpoint().Type(), // Type. + sops.State(), // State. + sfile.InodeID(), // Inode. + ) + + // Path + if len(addr.Addr) != 0 { + if addr.Addr[0] == 0 { + // Abstract path. + fmt.Fprintf(&buf, " @%s", string(addr.Addr[1:])) + } else { + fmt.Fprintf(&buf, " %s", string(addr.Addr)) + } + } + fmt.Fprintf(&buf, "\n") + + s.DecRef() + } + + data := []seqfile.SeqData{ + { + Buf: []byte("Num RefCount Protocol Flags Type St Inode Path\n"), + Handle: n, + }, + { + Buf: buf.Bytes(), + Handle: n, + }, + } + return data, 0 +} + +func networkToHost16(n uint16) uint16 { + // n is in network byte order, so is big-endian. The most-significant byte + // should be stored in the lower address. + // + // We manually inline binary.BigEndian.Uint16() because Go does not support + // non-primitive consts, so binary.BigEndian is a (mutable) var, so calls to + // binary.BigEndian.Uint16() require a read of binary.BigEndian and an + // interface method call, defeating inlining. + buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)} + return usermem.ByteOrder.Uint16(buf[:]) +} + +func writeInetAddr(w io.Writer, family int, i linux.SockAddr) { + switch family { + case linux.AF_INET: + var a linux.SockAddrInet + if i != nil { + a = *i.(*linux.SockAddrInet) + } + + // linux.SockAddrInet.Port is stored in the network byte order and is + // printed like a number in host byte order. Note that all numbers in host + // byte order are printed with the most-significant byte first when + // formatted with %X. See get_tcp4_sock() and udp4_format_sock() in Linux. + port := networkToHost16(a.Port) + + // linux.SockAddrInet.Addr is stored as a byte slice in big-endian order + // (i.e. most-significant byte in index 0). Linux represents this as a + // __be32 which is a typedef for an unsigned int, and is printed with + // %X. This means that for a little-endian machine, Linux prints the + // least-significant byte of the address first. To emulate this, we first + // invert the byte order for the address using usermem.ByteOrder.Uint32, + // which makes it have the equivalent encoding to a __be32 on a little + // endian machine. Note that this operation is a no-op on a big endian + // machine. Then similar to Linux, we format it with %X, which will print + // the most-significant byte of the __be32 address first, which is now + // actually the least-significant byte of the original address in + // linux.SockAddrInet.Addr on little endian machines, due to the conversion. + addr := usermem.ByteOrder.Uint32(a.Addr[:]) + + fmt.Fprintf(w, "%08X:%04X ", addr, port) + case linux.AF_INET6: + var a linux.SockAddrInet6 + if i != nil { + a = *i.(*linux.SockAddrInet6) + } + + port := networkToHost16(a.Port) + addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4]) + addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8]) + addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12]) + addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16]) + fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port) + } +} + +func commonReadSeqFileDataTCP(ctx context.Context, n seqfile.SeqHandle, k *kernel.Kernel, h seqfile.SeqHandle, fa int, header []byte) ([]seqfile.SeqData, int64) { + // t may be nil here if our caller is not part of a task goroutine. This can + // happen for example if we're here for "sentryctl cat". When t is nil, + // degrade gracefully and retrieve what we can. + t := kernel.TaskFromContext(ctx) + + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + for _, se := range k.ListSockets() { + s := se.Sock.Get() + if s == nil { + log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID) + continue + } + sfile := s.(*fs.File) + sops, ok := sfile.FileOperations.(socket.Socket) + if !ok { + panic(fmt.Sprintf("Found non-socket file in socket table: %+v", sfile)) + } + if family, stype, _ := sops.Type(); !(family == fa && stype == linux.SOCK_STREAM) { + s.DecRef() + // Not tcp4 sockets. + continue + } + + // Linux's documentation for the fields below can be found at + // https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt. + // For Linux's implementation, see net/ipv4/tcp_ipv4.c:get_tcp4_sock(). + // Note that the header doesn't contain labels for all the fields. + + // Field: sl; entry number. + fmt.Fprintf(&buf, "%4d: ", se.ID) + + // Field: local_adddress. + var localAddr linux.SockAddr + if t != nil { + if local, _, err := sops.GetSockName(t); err == nil { + localAddr = local + } + } + writeInetAddr(&buf, fa, localAddr) + + // Field: rem_address. + var remoteAddr linux.SockAddr + if t != nil { + if remote, _, err := sops.GetPeerName(t); err == nil { + remoteAddr = remote + } + } + writeInetAddr(&buf, fa, remoteAddr) + + // Field: state; socket state. + fmt.Fprintf(&buf, "%02X ", sops.State()) + + // Field: tx_queue, rx_queue; number of packets in the transmit and + // receive queue. Unimplemented. + fmt.Fprintf(&buf, "%08X:%08X ", 0, 0) + + // Field: tr, tm->when; timer active state and number of jiffies + // until timer expires. Unimplemented. + fmt.Fprintf(&buf, "%02X:%08X ", 0, 0) + + // Field: retrnsmt; number of unrecovered RTO timeouts. + // Unimplemented. + fmt.Fprintf(&buf, "%08X ", 0) + + // Field: uid. + uattr, err := sfile.Dirent.Inode.UnstableAttr(ctx) + if err != nil { + log.Warningf("Failed to retrieve unstable attr for socket file: %v", err) + fmt.Fprintf(&buf, "%5d ", 0) + } else { + creds := auth.CredentialsFromContext(ctx) + fmt.Fprintf(&buf, "%5d ", uint32(uattr.Owner.UID.In(creds.UserNamespace).OrOverflow())) + } + + // Field: timeout; number of unanswered 0-window probes. + // Unimplemented. + fmt.Fprintf(&buf, "%8d ", 0) + + // Field: inode. + fmt.Fprintf(&buf, "%8d ", sfile.InodeID()) + + // Field: refcount. Don't count the ref we obtain while deferencing + // the weakref to this socket. + fmt.Fprintf(&buf, "%d ", sfile.ReadRefs()-1) + + // Field: Socket struct address. Redacted due to the same reason as + // the 'Num' field in /proc/net/unix, see netUnix.ReadSeqFileData. + fmt.Fprintf(&buf, "%#016p ", (*socket.Socket)(nil)) + + // Field: retransmit timeout. Unimplemented. + fmt.Fprintf(&buf, "%d ", 0) + + // Field: predicted tick of soft clock (delayed ACK control data). + // Unimplemented. + fmt.Fprintf(&buf, "%d ", 0) + + // Field: (ack.quick<<1)|ack.pingpong, Unimplemented. + fmt.Fprintf(&buf, "%d ", 0) + + // Field: sending congestion window, Unimplemented. + fmt.Fprintf(&buf, "%d ", 0) + + // Field: Slow start size threshold, -1 if threshold >= 0xFFFF. + // Unimplemented, report as large threshold. + fmt.Fprintf(&buf, "%d", -1) + + fmt.Fprintf(&buf, "\n") + + s.DecRef() + } + + data := []seqfile.SeqData{ + { + Buf: header, + Handle: n, + }, + { + Buf: buf.Bytes(), + Handle: n, + }, + } + return data, 0 +} + +// netTCP implements seqfile.SeqSource for /proc/net/tcp. +// +// +stateify savable +type netTCP struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*netTCP) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *netTCP) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + header := []byte(" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode \n") + return commonReadSeqFileDataTCP(ctx, n, n.k, h, linux.AF_INET, header) +} + +// netTCP6 implements seqfile.SeqSource for /proc/net/tcp6. +// +// +stateify savable +type netTCP6 struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*netTCP6) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *netTCP6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + header := []byte(" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n") + return commonReadSeqFileDataTCP(ctx, n, n.k, h, linux.AF_INET6, header) +} + +// netUDP implements seqfile.SeqSource for /proc/net/udp. +// +// +stateify savable +type netUDP struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*netUDP) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *netUDP) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + // t may be nil here if our caller is not part of a task goroutine. This can + // happen for example if we're here for "sentryctl cat". When t is nil, + // degrade gracefully and retrieve what we can. + t := kernel.TaskFromContext(ctx) + + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + for _, se := range n.k.ListSockets() { + s := se.Sock.Get() + if s == nil { + log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID) + continue + } + sfile := s.(*fs.File) + sops, ok := sfile.FileOperations.(socket.Socket) + if !ok { + panic(fmt.Sprintf("Found non-socket file in socket table: %+v", sfile)) + } + if family, stype, _ := sops.Type(); family != linux.AF_INET || stype != linux.SOCK_DGRAM { + s.DecRef() + // Not udp4 socket. + continue + } + + // For Linux's implementation, see net/ipv4/udp.c:udp4_format_sock(). + + // Field: sl; entry number. + fmt.Fprintf(&buf, "%5d: ", se.ID) + + // Field: local_adddress. + var localAddr linux.SockAddrInet + if t != nil { + if local, _, err := sops.GetSockName(t); err == nil { + localAddr = *local.(*linux.SockAddrInet) + } + } + writeInetAddr(&buf, linux.AF_INET, &localAddr) + + // Field: rem_address. + var remoteAddr linux.SockAddrInet + if t != nil { + if remote, _, err := sops.GetPeerName(t); err == nil { + remoteAddr = *remote.(*linux.SockAddrInet) + } + } + writeInetAddr(&buf, linux.AF_INET, &remoteAddr) + + // Field: state; socket state. + fmt.Fprintf(&buf, "%02X ", sops.State()) + + // Field: tx_queue, rx_queue; number of packets in the transmit and + // receive queue. Unimplemented. + fmt.Fprintf(&buf, "%08X:%08X ", 0, 0) + + // Field: tr, tm->when. Always 0 for UDP. + fmt.Fprintf(&buf, "%02X:%08X ", 0, 0) + + // Field: retrnsmt. Always 0 for UDP. + fmt.Fprintf(&buf, "%08X ", 0) + + // Field: uid. + uattr, err := sfile.Dirent.Inode.UnstableAttr(ctx) + if err != nil { + log.Warningf("Failed to retrieve unstable attr for socket file: %v", err) + fmt.Fprintf(&buf, "%5d ", 0) + } else { + creds := auth.CredentialsFromContext(ctx) + fmt.Fprintf(&buf, "%5d ", uint32(uattr.Owner.UID.In(creds.UserNamespace).OrOverflow())) + } + + // Field: timeout. Always 0 for UDP. + fmt.Fprintf(&buf, "%8d ", 0) + + // Field: inode. + fmt.Fprintf(&buf, "%8d ", sfile.InodeID()) + + // Field: ref; reference count on the socket inode. Don't count the ref + // we obtain while deferencing the weakref to this socket. + fmt.Fprintf(&buf, "%d ", sfile.ReadRefs()-1) + + // Field: Socket struct address. Redacted due to the same reason as + // the 'Num' field in /proc/net/unix, see netUnix.ReadSeqFileData. + fmt.Fprintf(&buf, "%#016p ", (*socket.Socket)(nil)) + + // Field: drops; number of dropped packets. Unimplemented. + fmt.Fprintf(&buf, "%d", 0) + + fmt.Fprintf(&buf, "\n") + + s.DecRef() + } + + data := []seqfile.SeqData{ + { + Buf: []byte(" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops \n"), + Handle: n, + }, + { + Buf: buf.Bytes(), + Handle: n, + }, + } + return data, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/task_net.go) diff --git a/pkg/sentry/fs/proc/net_test.go b/pkg/sentry/fs/proc/net_test.go new file mode 100644 index 000000000..f18681405 --- /dev/null +++ b/pkg/sentry/fs/proc/net_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "reflect" + "testing" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/sentry/inet" +) + +func newIPv6TestStack() *inet.TestStack { + s := inet.NewTestStack() + s.SupportsIPv6Flag = true + return s +} + +func TestIfinet6NoAddresses(t *testing.T) { + n := &ifinet6{s: newIPv6TestStack()} + if got := n.contents(); got != nil { + t.Errorf("Got n.contents() = %v, want = %v", got, nil) + } +} + +func TestIfinet6(t *testing.T) { + s := newIPv6TestStack() + s.InterfacesMap[1] = inet.Interface{Name: "eth0"} + s.InterfaceAddrsMap[1] = []inet.InterfaceAddr{ + { + Family: linux.AF_INET6, + PrefixLen: 128, + Addr: []byte("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"), + }, + } + s.InterfacesMap[2] = inet.Interface{Name: "eth1"} + s.InterfaceAddrsMap[2] = []inet.InterfaceAddr{ + { + Family: linux.AF_INET6, + PrefixLen: 128, + Addr: []byte("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"), + }, + } + want := map[string]struct{}{ + "000102030405060708090a0b0c0d0e0f 01 80 00 00 eth0\n": {}, + "101112131415161718191a1b1c1d1e1f 02 80 00 00 eth1\n": {}, + } + + n := &ifinet6{s: s} + contents := n.contents() + if len(contents) != len(want) { + t.Errorf("Got len(n.contents()) = %d, want = %d", len(contents), len(want)) + } + got := map[string]struct{}{} + for _, l := range contents { + got[l] = struct{}{} + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("Got n.contents() = %v, want = %v", got, want) + } +} diff --git a/pkg/sentry/fs/proc/proc.go b/pkg/sentry/fs/proc/proc.go new file mode 100644 index 000000000..c659224a7 --- /dev/null +++ b/pkg/sentry/fs/proc/proc.go @@ -0,0 +1,248 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package proc implements a partial in-memory file system for profs. +package proc + +import ( + "fmt" + "sort" + "strconv" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/syserror" +) + +// LINT.IfChange + +// proc is a root proc node. +// +// +stateify savable +type proc struct { + ramfs.Dir + + // k is the Kernel containing this proc node. + k *kernel.Kernel + + // pidns is the PID namespace of the task that mounted the proc filesystem + // that this node represents. + pidns *kernel.PIDNamespace + + // cgroupControllers is a map of controller name to directory in the + // cgroup hierarchy. These controllers are immutable and will be listed + // in /proc/pid/cgroup if not nil. + cgroupControllers map[string]string +} + +// New returns the root node of a partial simple procfs. +func New(ctx context.Context, msrc *fs.MountSource, cgroupControllers map[string]string) (*fs.Inode, error) { + k := kernel.KernelFromContext(ctx) + if k == nil { + return nil, fmt.Errorf("procfs requires a kernel") + } + pidns := kernel.PIDNamespaceFromContext(ctx) + if pidns == nil { + return nil, fmt.Errorf("procfs requires a PID namespace") + } + + // Note that these are just the static members. There are dynamic + // members populated in Readdir and Lookup below. + contents := map[string]*fs.Inode{ + "cpuinfo": newCPUInfo(ctx, msrc), + "filesystems": seqfile.NewSeqFileInode(ctx, &filesystemsData{}, msrc), + "loadavg": seqfile.NewSeqFileInode(ctx, &loadavgData{}, msrc), + "meminfo": seqfile.NewSeqFileInode(ctx, &meminfoData{k}, msrc), + "mounts": newProcInode(ctx, ramfs.NewSymlink(ctx, fs.RootOwner, "self/mounts"), msrc, fs.Symlink, nil), + "net": newProcInode(ctx, ramfs.NewSymlink(ctx, fs.RootOwner, "self/net"), msrc, fs.Symlink, nil), + "self": newSelf(ctx, pidns, msrc), + "stat": seqfile.NewSeqFileInode(ctx, &statData{k}, msrc), + "thread-self": newThreadSelf(ctx, pidns, msrc), + "uptime": newUptime(ctx, msrc), + "version": seqfile.NewSeqFileInode(ctx, &versionData{k}, msrc), + } + + // Construct the proc InodeOperations. + p := &proc{ + Dir: *ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)), + k: k, + pidns: pidns, + cgroupControllers: cgroupControllers, + } + + // Add more contents that need proc to be initialized. + p.AddChild(ctx, "sys", p.newSysDir(ctx, msrc)) + + return newProcInode(ctx, p, msrc, fs.SpecialDirectory, nil), nil +} + +// self is a magical link. +// +// +stateify savable +type self struct { + ramfs.Symlink + + pidns *kernel.PIDNamespace +} + +// newSelf returns a new "self" node. +func newSelf(ctx context.Context, pidns *kernel.PIDNamespace, msrc *fs.MountSource) *fs.Inode { + s := &self{ + Symlink: *ramfs.NewSymlink(ctx, fs.RootOwner, ""), + pidns: pidns, + } + return newProcInode(ctx, s, msrc, fs.Symlink, nil) +} + +// newThreadSelf returns a new "threadSelf" node. +func newThreadSelf(ctx context.Context, pidns *kernel.PIDNamespace, msrc *fs.MountSource) *fs.Inode { + s := &threadSelf{ + Symlink: *ramfs.NewSymlink(ctx, fs.RootOwner, ""), + pidns: pidns, + } + return newProcInode(ctx, s, msrc, fs.Symlink, nil) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if t := kernel.TaskFromContext(ctx); t != nil { + tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup()) + if tgid == 0 { + return "", syserror.ENOENT + } + return strconv.FormatUint(uint64(tgid), 10), nil + } + + // Who is reading this link? + return "", syserror.EINVAL +} + +// threadSelf is more magical than "self" link. +// +// +stateify savable +type threadSelf struct { + ramfs.Symlink + + pidns *kernel.PIDNamespace +} + +// Readlink implements fs.InodeOperations.Readlink. +func (s *threadSelf) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if t := kernel.TaskFromContext(ctx); t != nil { + tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup()) + tid := s.pidns.IDOfTask(t) + if tid == 0 || tgid == 0 { + return "", syserror.ENOENT + } + return fmt.Sprintf("%d/task/%d", tgid, tid), nil + } + + // Who is reading this link? + return "", syserror.EINVAL +} + +// Lookup loads an Inode at name into a Dirent. +func (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + dirent, walkErr := p.Dir.Lookup(ctx, dir, name) + if walkErr == nil { + return dirent, nil + } + + // Try to lookup a corresponding task. + tid, err := strconv.ParseUint(name, 10, 64) + if err != nil { + // Ignore the parse error and return the original. + return nil, walkErr + } + + // Grab the other task. + otherTask := p.pidns.TaskWithID(kernel.ThreadID(tid)) + if otherTask == nil { + // Per above. + return nil, walkErr + } + + // Wrap it in a taskDir. + td := p.newTaskDir(otherTask, dir.MountSource, true) + return fs.NewDirent(ctx, td, name), nil +} + +// GetFile implements fs.InodeOperations. +func (p *proc) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &rootProcFile{iops: p}), nil +} + +// rootProcFile implements fs.FileOperations for the proc directory. +// +// +stateify savable +type rootProcFile struct { + fsutil.DirFileOperations `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + iops *proc +} + +var _ fs.FileOperations = (*rootProcFile)(nil) + +// Readdir implements fs.FileOperations.Readdir. +func (rpf *rootProcFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) { + offset := file.Offset() + dirCtx := &fs.DirCtx{ + Serializer: ser, + } + + // Get normal directory contents from ramfs dir. + names, m := rpf.iops.Dir.Children() + + // Add dot and dotdot. + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dot, dotdot := file.Dirent.GetDotAttrs(root) + names = append(names, ".", "..") + m["."] = dot + m[".."] = dotdot + + // Collect tasks. + // Per linux we only include it in directory listings if it's the leader. + // But for whatever crazy reason, you can still walk to the given node. + for _, tg := range rpf.iops.pidns.ThreadGroups() { + if leader := tg.Leader(); leader != nil { + name := strconv.FormatUint(uint64(rpf.iops.pidns.IDOfThreadGroup(tg)), 10) + m[name] = fs.GenericDentAttr(fs.SpecialDirectory, device.ProcDevice) + names = append(names, name) + } + } + + if offset >= int64(len(m)) { + return offset, nil + } + sort.Strings(names) + names = names[offset:] + for _, name := range names { + if err := dirCtx.DirEmit(name, m[name]); err != nil { + return offset, err + } + offset++ + } + return offset, nil +} + +// LINT.ThenChange(../../fsimpl/proc/tasks.go) diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD new file mode 100644 index 000000000..21338d912 --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/BUILD @@ -0,0 +1,35 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "seqfile", + srcs = ["seqfile.go"], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/proc/device", + "//pkg/sentry/kernel/time", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "seqfile_test", + size = "small", + srcs = ["seqfile_test.go"], + library = ":seqfile", + deps = [ + "//pkg/context", + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ramfs", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go new file mode 100644 index 000000000..6121f0e95 --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/seqfile.go @@ -0,0 +1,283 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package seqfile provides dynamic ordered files. +package seqfile + +import ( + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// SeqHandle is a helper handle to seek in the file. +type SeqHandle interface{} + +// SeqData holds the data for one unit in the file. +// +// +stateify savable +type SeqData struct { + // The data to be returned to the user. + Buf []byte + + // A seek handle used to find the next valid unit in ReadSeqFiledata. + Handle SeqHandle +} + +// SeqSource is a data source for a SeqFile file. +type SeqSource interface { + // NeedsUpdate returns true if the consumer of SeqData should call + // ReadSeqFileData again. Generation is the generation returned by + // ReadSeqFile or 0. + NeedsUpdate(generation int64) bool + + // Returns a slice of SeqData ordered by unit and the current + // generation. The first entry in the slice is greater than the handle. + // If handle is nil then all known records are returned. Generation + // must always be greater than 0. + ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64) +} + +// SeqGenerationCounter is a counter to keep track if the SeqSource should be +// updated. SeqGenerationCounter is not thread-safe and should be protected +// with a mutex. +type SeqGenerationCounter struct { + // The generation that the SeqData is at. + generation int64 +} + +// SetGeneration sets the generation to the new value, be careful to not set it +// to a value less than current. +func (s *SeqGenerationCounter) SetGeneration(generation int64) { + s.generation = generation +} + +// Update increments the current generation. +func (s *SeqGenerationCounter) Update() { + s.generation++ +} + +// Generation returns the current generation counter. +func (s *SeqGenerationCounter) Generation() int64 { + return s.generation +} + +// IsCurrent returns whether the given generation is current or not. +func (s *SeqGenerationCounter) IsCurrent(generation int64) bool { + return s.Generation() == generation +} + +// SeqFile is used to provide dynamic files that can be ordered by record. +// +// +stateify savable +type SeqFile struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleExtendedAttributes + fsutil.InodeSimpleAttributes + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + SeqSource + + source []SeqData + generation int64 + lastRead int64 +} + +var _ fs.InodeOperations = (*SeqFile)(nil) + +// NewSeqFile returns a seqfile suitable for use by external consumers. +func NewSeqFile(ctx context.Context, source SeqSource) *SeqFile { + return &SeqFile{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + SeqSource: source, + } +} + +// NewSeqFileInode returns an Inode with SeqFile InodeOperations. +func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource) *fs.Inode { + iops := NewSeqFile(ctx, source) + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(ctx, iops, msrc, sattr) +} + +// UnstableAttr returns unstable attributes of the SeqFile. +func (s *SeqFile) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := s.InodeSimpleAttributes.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + uattr.ModificationTime = ktime.NowFromContext(ctx) + return uattr, nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (s *SeqFile) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &seqFileOperations{seqFile: s}), nil +} + +// findIndexAndOffset finds the unit that corresponds to a certain offset. +// Returns the unit and the offset within the unit. If there are not enough +// units len(data) and leftover offset is returned. +func findIndexAndOffset(data []SeqData, offset int64) (int, int64) { + for i, buf := range data { + l := int64(len(buf.Buf)) + if offset < l { + return i, offset + } + offset -= l + } + return len(data), offset +} + +// updateSourceLocked requires that s.mu is held. +func (s *SeqFile) updateSourceLocked(ctx context.Context, record int) { + var h SeqHandle + if record == 0 { + h = nil + } else { + h = s.source[record-1].Handle + } + // Save what we have previously read. + s.source = s.source[:record] + var newSource []SeqData + newSource, s.generation = s.SeqSource.ReadSeqFileData(ctx, h) + s.source = append(s.source, newSource...) +} + +// seqFileOperations implements fs.FileOperations. +// +// +stateify savable +type seqFileOperations struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + seqFile *SeqFile +} + +var _ fs.FileOperations = (*seqFileOperations)(nil) + +// Write implements fs.FileOperations.Write. +func (*seqFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EACCES +} + +// Read implements fs.FileOperations.Read. +func (sfo *seqFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + sfo.seqFile.mu.Lock() + defer sfo.seqFile.mu.Unlock() + + sfo.seqFile.NotifyAccess(ctx) + defer func() { sfo.seqFile.lastRead = offset }() + + updated := false + + // Try to find where we should start reading this file. + i, recordOffset := findIndexAndOffset(sfo.seqFile.source, offset) + if i == len(sfo.seqFile.source) { + // Ok, we're at EOF. Let's first check to see if there might be + // more data available to us. If there is more data, add it to + // the end and try reading again. + if !sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) { + return 0, io.EOF + } + oldLen := len(sfo.seqFile.source) + sfo.seqFile.updateSourceLocked(ctx, len(sfo.seqFile.source)) + updated = true + // We know that we had consumed everything up until this point + // so we search in the new slice instead of starting over. + i, recordOffset = findIndexAndOffset(sfo.seqFile.source[oldLen:], recordOffset) + i += oldLen + // i is at most the length of the slice which is + // len(sfo.seqFile.source) - oldLen. So at most i will be equal to + // len(sfo.seqFile.source). + if i == len(sfo.seqFile.source) { + return 0, io.EOF + } + } + + var done int64 + // We're reading parts of a record, finish reading the current object + // before continuing on to the next. We don't refresh our data source + // before this record is completed. + if recordOffset != 0 { + n, err := dst.CopyOut(ctx, sfo.seqFile.source[i].Buf[recordOffset:]) + done += int64(n) + dst = dst.DropFirst(n) + if dst.NumBytes() == 0 || err != nil { + return done, err + } + i++ + } + + // Next/New unit, update the source file if necessary. Make an extra + // check to see if we've seeked backwards and if so always update our + // data source. + if !updated && (sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) || sfo.seqFile.lastRead > offset) { + sfo.seqFile.updateSourceLocked(ctx, i) + // recordOffset is 0 here and we won't update records behind the + // current one so recordOffset is still 0 even though source + // just got updated. Just read the next record. + } + + // Finish by reading all the available data. + for _, buf := range sfo.seqFile.source[i:] { + n, err := dst.CopyOut(ctx, buf.Buf) + done += int64(n) + dst = dst.DropFirst(n) + if dst.NumBytes() == 0 || err != nil { + return done, err + } + } + + // If the file shrank (entries not yet read were removed above) + // while we tried to read we can end up with nothing read. + if done == 0 && dst.NumBytes() != 0 { + return 0, io.EOF + } + return done, nil +} diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go new file mode 100644 index 000000000..98e394569 --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/seqfile_test.go @@ -0,0 +1,279 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seqfile + +import ( + "bytes" + "fmt" + "io" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/usermem" +) + +type seqTest struct { + actual []SeqData + update bool +} + +func (s *seqTest) Init() { + var sq []SeqData + // Create some SeqData. + for i := 0; i < 10; i++ { + var b []byte + for j := 0; j < 10; j++ { + b = append(b, byte(i)) + } + sq = append(sq, SeqData{ + Buf: b, + Handle: &testHandle{i: i}, + }) + } + s.actual = sq +} + +// NeedsUpdate reports whether we need to update the data we've previously read. +func (s *seqTest) NeedsUpdate(int64) bool { + return s.update +} + +// ReadSeqFiledata returns a slice of SeqData which contains elements +// greater than the handle. +func (s *seqTest) ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64) { + if handle == nil { + return s.actual, 0 + } + h := *handle.(*testHandle) + var ret []SeqData + for _, b := range s.actual { + // We want the next one. + h2 := *b.Handle.(*testHandle) + if h2.i > h.i { + ret = append(ret, b) + } + } + return ret, 0 +} + +// Flatten a slice of slices into one slice. +func flatten(buf ...[]byte) []byte { + var flat []byte + for _, b := range buf { + flat = append(flat, b...) + } + return flat +} + +type testHandle struct { + i int +} + +type testTable struct { + offset int64 + readBufferSize int + expectedData []byte + expectedError error +} + +func runTableTests(ctx context.Context, table []testTable, dirent *fs.Dirent) error { + for _, tt := range table { + file, err := dirent.Inode.InodeOperations.GetFile(ctx, dirent, fs.FileFlags{Read: true}) + if err != nil { + return fmt.Errorf("GetFile returned error: %v", err) + } + + data := make([]byte, tt.readBufferSize) + resultLen, err := file.Preadv(ctx, usermem.BytesIOSequence(data), tt.offset) + if err != tt.expectedError { + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError) + } + expectedLen := int64(len(tt.expectedData)) + if resultLen != expectedLen { + // We make this just an error so we wall through and print the data below. + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen) + } + if !bytes.Equal(data[:expectedLen], tt.expectedData) { + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData) + } + } + return nil +} + +func TestSeqFile(t *testing.T) { + testSource := &seqTest{} + testSource.Init() + + // Create a file that can be R/W. + ctx := contexttest.Context(t) + m := fs.NewPseudoMountSource(ctx) + contents := map[string]*fs.Inode{ + "foo": NewSeqFileInode(ctx, testSource, m), + } + root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777)) + + // How about opening it? + inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory}) + dirent2, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo for n2: %v", err) + } + n2 := dirent2.Inode.InodeOperations + file2, err := n2.GetFile(ctx, dirent2, fs.FileFlags{Read: true, Write: true}) + if err != nil { + t.Fatalf("GetFile returned error: %v", err) + } + + // Writing? + if _, err := file2.Writev(ctx, usermem.BytesIOSequence([]byte("test"))); err == nil { + t.Fatalf("managed to write to n2: %v", err) + } + + // How about reading? + dirent3, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo: %v", err) + } + n3 := dirent3.Inode.InodeOperations + if n2 != n3 { + t.Error("got n2 != n3, want same") + } + + testSource.update = true + + table := []testTable{ + // Read past the end. + {100, 4, []byte{}, io.EOF}, + {110, 4, []byte{}, io.EOF}, + {200, 4, []byte{}, io.EOF}, + // Read a truncated first line. + {0, 4, testSource.actual[0].Buf[:4], nil}, + // Read the whole first line. + {0, 10, testSource.actual[0].Buf, nil}, + // Read the whole first line + 5 bytes of second line. + {0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil}, + // First 4 bytes of the second line. + {10, 4, testSource.actual[1].Buf[:4], nil}, + // Read the two first lines. + {0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil}, + // Read three lines. + {0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil}, + // Read everything, but use a bigger buffer than necessary. + {0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil}, + // Read the last 3 bytes. + {97, 10, testSource.actual[9].Buf[7:], nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err) + } + + // Disable updates and do it again. + testSource.update = false + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err) + } +} + +// Test that we behave correctly when the file is updated. +func TestSeqFileFileUpdated(t *testing.T) { + testSource := &seqTest{} + testSource.Init() + testSource.update = true + + // Create a file that can be R/W. + ctx := contexttest.Context(t) + m := fs.NewPseudoMountSource(ctx) + contents := map[string]*fs.Inode{ + "foo": NewSeqFileInode(ctx, testSource, m), + } + root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777)) + + // How about opening it? + inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory}) + dirent2, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo for dirent2: %v", err) + } + + table := []testTable{ + {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed: %v", err) + } + // Delete the first entry. + cut := testSource.actual[0].Buf + testSource.actual = testSource.actual[1:] + + table = []testTable{ + // Try reading buffer 0 with an offset. This will not delete the old data. + {1, 5, cut[1:6], nil}, + // Reset our file by reading at offset 0. + {0, 10, testSource.actual[0].Buf, nil}, + {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil}, + // Read the same data a second time. + {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil}, + // Read the following two lines. + {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed after removing first entry: %v", err) + } + + // Add a new duplicate line in the middle (6666...) + after := testSource.actual[5:] + testSource.actual = testSource.actual[:4] + // Note the list must be sorted. + testSource.actual = append(testSource.actual, after[0]) + testSource.actual = append(testSource.actual, after...) + + table = []testTable{ + {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed after adding middle entry: %v", err) + } + // This will be used in a later test. + oldTestData := testSource.actual + + // Delete everything. + testSource.actual = testSource.actual[:0] + table = []testTable{ + {20, 20, []byte{}, io.EOF}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed after removing all entries: %v", err) + } + // Restore some of the data. + testSource.actual = oldTestData[:1] + table = []testTable{ + {6, 20, testSource.actual[0].Buf[6:], nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed after adding first entry back: %v", err) + } + + // Re-extend the data + testSource.actual = oldTestData + table = []testTable{ + {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, + } + if err := runTableTests(ctx, table, dirent2); err != nil { + t.Errorf("runTableTest failed after extending testSource: %v", err) + } +} diff --git a/pkg/sentry/fs/proc/stat.go b/pkg/sentry/fs/proc/stat.go new file mode 100644 index 000000000..d4fbd76ac --- /dev/null +++ b/pkg/sentry/fs/proc/stat.go @@ -0,0 +1,146 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/kernel" +) + +// LINT.IfChange + +// statData backs /proc/stat. +// +// +stateify savable +type statData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*statData) NeedsUpdate(generation int64) bool { + return true +} + +// cpuStats contains the breakdown of CPU time for /proc/stat. +type cpuStats struct { + // user is time spent in userspace tasks with non-positive niceness. + user uint64 + + // nice is time spent in userspace tasks with positive niceness. + nice uint64 + + // system is time spent in non-interrupt kernel context. + system uint64 + + // idle is time spent idle. + idle uint64 + + // ioWait is time spent waiting for IO. + ioWait uint64 + + // irq is time spent in interrupt context. + irq uint64 + + // softirq is time spent in software interrupt context. + softirq uint64 + + // steal is involuntary wait time. + steal uint64 + + // guest is time spent in guests with non-positive niceness. + guest uint64 + + // guestNice is time spent in guests with positive niceness. + guestNice uint64 +} + +// String implements fmt.Stringer. +func (c cpuStats) String() string { + return fmt.Sprintf("%d %d %d %d %d %d %d %d %d %d", c.user, c.nice, c.system, c.idle, c.ioWait, c.irq, c.softirq, c.steal, c.guest, c.guestNice) +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (s *statData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + // TODO(b/37226836): We currently export only zero CPU stats. We could + // at least provide some aggregate stats. + var cpu cpuStats + fmt.Fprintf(&buf, "cpu %s\n", cpu) + + for c, max := uint(0), s.k.ApplicationCores(); c < max; c++ { + fmt.Fprintf(&buf, "cpu%d %s\n", c, cpu) + } + + // The total number of interrupts is dependent on the CPUs and PCI + // devices on the system. See arch_probe_nr_irqs. + // + // Since we don't report real interrupt stats, just choose an arbitrary + // value from a representative VM. + const numInterrupts = 256 + + // The Kernel doesn't handle real interrupts, so report all zeroes. + // TODO(b/37226836): We could count page faults as #PF. + fmt.Fprintf(&buf, "intr 0") // total + for i := 0; i < numInterrupts; i++ { + fmt.Fprintf(&buf, " 0") + } + fmt.Fprintf(&buf, "\n") + + // Total number of context switches. + // TODO(b/37226836): Count this. + fmt.Fprintf(&buf, "ctxt 0\n") + + // CLOCK_REALTIME timestamp from boot, in seconds. + fmt.Fprintf(&buf, "btime %d\n", s.k.Timekeeper().BootTime().Seconds()) + + // Total number of clones. + // TODO(b/37226836): Count this. + fmt.Fprintf(&buf, "processes 0\n") + + // Number of runnable tasks. + // TODO(b/37226836): Count this. + fmt.Fprintf(&buf, "procs_running 0\n") + + // Number of tasks waiting on IO. + // TODO(b/37226836): Count this. + fmt.Fprintf(&buf, "procs_blocked 0\n") + + // Number of each softirq handled. + fmt.Fprintf(&buf, "softirq 0") // total + for i := 0; i < linux.NumSoftIRQ; i++ { + fmt.Fprintf(&buf, " 0") + } + fmt.Fprintf(&buf, "\n") + + return []seqfile.SeqData{ + { + Buf: buf.Bytes(), + Handle: (*statData)(nil), + }, + }, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/task_files.go) diff --git a/pkg/sentry/fs/proc/sys.go b/pkg/sentry/fs/proc/sys.go new file mode 100644 index 000000000..f8aad2dbd --- /dev/null +++ b/pkg/sentry/fs/proc/sys.go @@ -0,0 +1,159 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + "strconv" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// mmapMinAddrData backs /proc/sys/vm/mmap_min_addr. +// +// +stateify savable +type mmapMinAddrData struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*mmapMinAddrData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (d *mmapMinAddrData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + return []seqfile.SeqData{ + { + Buf: []byte(fmt.Sprintf("%d\n", d.k.Platform.MinUserAddress())), + Handle: (*mmapMinAddrData)(nil), + }, + }, 0 +} + +// +stateify savable +type overcommitMemory struct{} + +func (*overcommitMemory) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource. +func (*overcommitMemory) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + return []seqfile.SeqData{ + { + Buf: []byte("0\n"), + Handle: (*overcommitMemory)(nil), + }, + }, 0 +} + +func (p *proc) newKernelDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + h := hostname{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + } + + children := map[string]*fs.Inode{ + "hostname": newProcInode(ctx, &h, msrc, fs.SpecialFile, nil), + "shmall": newStaticProcInode(ctx, msrc, []byte(strconv.FormatUint(linux.SHMALL, 10))), + "shmmax": newStaticProcInode(ctx, msrc, []byte(strconv.FormatUint(linux.SHMMAX, 10))), + "shmmni": newStaticProcInode(ctx, msrc, []byte(strconv.FormatUint(linux.SHMMNI, 10))), + } + + d := ramfs.NewDir(ctx, children, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newVMDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + children := map[string]*fs.Inode{ + "mmap_min_addr": seqfile.NewSeqFileInode(ctx, &mmapMinAddrData{p.k}, msrc), + "overcommit_memory": seqfile.NewSeqFileInode(ctx, &overcommitMemory{}, msrc), + } + d := ramfs.NewDir(ctx, children, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newSysDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + children := map[string]*fs.Inode{ + "kernel": p.newKernelDir(ctx, msrc), + "net": p.newSysNetDir(ctx, msrc), + "vm": p.newVMDir(ctx, msrc), + } + + d := ramfs.NewDir(ctx, children, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +// hostname is the inode for a file containing the system hostname. +// +// +stateify savable +type hostname struct { + fsutil.SimpleFileInode +} + +// GetFile implements fs.InodeOperations.GetFile. +func (h *hostname) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &hostnameFile{}), nil +} + +var _ fs.InodeOperations = (*hostname)(nil) + +// +stateify savable +type hostnameFile struct { + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSeek `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +// Read implements fs.FileOperations.Read. +func (hf *hostnameFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + utsns := kernel.UTSNamespaceFromContext(ctx) + contents := []byte(utsns.HostName() + "\n") + if offset >= int64(len(contents)) { + return 0, io.EOF + } + n, err := dst.CopyOut(ctx, contents[offset:]) + return int64(n), err + +} + +var _ fs.FileOperations = (*hostnameFile)(nil) + +// LINT.ThenChange(../../fsimpl/proc/tasks_sys.go) diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go new file mode 100644 index 000000000..702fdd392 --- /dev/null +++ b/pkg/sentry/fs/proc/sys_net.go @@ -0,0 +1,372 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/inet" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +type tcpMemDir int + +const ( + tcpRMem tcpMemDir = iota + tcpWMem +) + +// tcpMemInode is used to read/write the size of netstack tcp buffers. +// +// TODO(b/121381035): If we have multiple proc mounts, concurrent writes can +// leave netstack and the proc files in an inconsistent state. Since we set the +// buffer size from these proc files on restore, we may also race and end up in +// an inconsistent state on restore. +// +// +stateify savable +type tcpMemInode struct { + fsutil.SimpleFileInode + dir tcpMemDir + s inet.Stack `state:"wait"` + + // size stores the tcp buffer size during save, and sets the buffer + // size in netstack in restore. We must save/restore this here, since + // netstack itself is stateless. + size inet.TCPBufferSize + + // mu protects against concurrent reads/writes to files based on this + // inode. + mu sync.Mutex `state:"nosave"` +} + +var _ fs.InodeOperations = (*tcpMemInode)(nil) + +func newTCPMemInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack, dir tcpMemDir) *fs.Inode { + tm := &tcpMemInode{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0644), linux.PROC_SUPER_MAGIC), + s: s, + dir: dir, + } + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(ctx, tm, msrc, sattr) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*tcpMemInode) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (m *tcpMemInode) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + return fs.NewFile(ctx, dirent, flags, &tcpMemFile{tcpMemInode: m}), nil +} + +// +stateify savable +type tcpMemFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + tcpMemInode *tcpMemInode +} + +var _ fs.FileOperations = (*tcpMemFile)(nil) + +// Read implements fs.FileOperations.Read. +func (f *tcpMemFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset != 0 { + return 0, io.EOF + } + f.tcpMemInode.mu.Lock() + defer f.tcpMemInode.mu.Unlock() + + size, err := readSize(f.tcpMemInode.dir, f.tcpMemInode.s) + if err != nil { + return 0, err + } + s := fmt.Sprintf("%d\t%d\t%d\n", size.Min, size.Default, size.Max) + n, err := dst.CopyOut(ctx, []byte(s)) + return int64(n), err +} + +// Write implements fs.FileOperations.Write. +func (f *tcpMemFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + f.tcpMemInode.mu.Lock() + defer f.tcpMemInode.mu.Unlock() + + src = src.TakeFirst(usermem.PageSize - 1) + size, err := readSize(f.tcpMemInode.dir, f.tcpMemInode.s) + if err != nil { + return 0, err + } + buf := []int32{int32(size.Min), int32(size.Default), int32(size.Max)} + n, cperr := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, buf, src.Opts) + newSize := inet.TCPBufferSize{ + Min: int(buf[0]), + Default: int(buf[1]), + Max: int(buf[2]), + } + if err := writeSize(f.tcpMemInode.dir, f.tcpMemInode.s, newSize); err != nil { + return n, err + } + return n, cperr +} + +func readSize(dirType tcpMemDir, s inet.Stack) (inet.TCPBufferSize, error) { + switch dirType { + case tcpRMem: + return s.TCPReceiveBufferSize() + case tcpWMem: + return s.TCPSendBufferSize() + default: + panic(fmt.Sprintf("unknown tcpMemFile type: %v", dirType)) + } +} + +func writeSize(dirType tcpMemDir, s inet.Stack, size inet.TCPBufferSize) error { + switch dirType { + case tcpRMem: + return s.SetTCPReceiveBufferSize(size) + case tcpWMem: + return s.SetTCPSendBufferSize(size) + default: + panic(fmt.Sprintf("unknown tcpMemFile type: %v", dirType)) + } +} + +// +stateify savable +type tcpSack struct { + fsutil.SimpleFileInode + + stack inet.Stack `state:"wait"` + enabled *bool +} + +func newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode { + ts := &tcpSack{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0644), linux.PROC_SUPER_MAGIC), + stack: s, + } + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(ctx, ts, msrc, sattr) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*tcpSack) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (s *tcpSack) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + flags.Pwrite = true + return fs.NewFile(ctx, dirent, flags, &tcpSackFile{ + tcpSack: s, + stack: s.stack, + }), nil +} + +// +stateify savable +type tcpSackFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + tcpSack *tcpSack + + stack inet.Stack `state:"wait"` +} + +// Read implements fs.FileOperations.Read. +func (f *tcpSackFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset != 0 { + return 0, io.EOF + } + + if f.tcpSack.enabled == nil { + sack, err := f.stack.TCPSACKEnabled() + if err != nil { + return 0, err + } + f.tcpSack.enabled = &sack + } + + val := "0\n" + if *f.tcpSack.enabled { + // Technically, this is not quite compatible with Linux. Linux + // stores these as an integer, so if you write "2" into + // tcp_sack, you should get 2 back. Tough luck. + val = "1\n" + } + n, err := dst.CopyOut(ctx, []byte(val)) + return int64(n), err +} + +// Write implements fs.FileOperations.Write. +func (f *tcpSackFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + src = src.TakeFirst(usermem.PageSize - 1) + + var v int32 + n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) + if err != nil { + return n, err + } + if f.tcpSack.enabled == nil { + f.tcpSack.enabled = new(bool) + } + *f.tcpSack.enabled = v != 0 + return n, f.tcpSack.stack.SetTCPSACKEnabled(*f.tcpSack.enabled) +} + +func (p *proc) newSysNetCore(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode { + // The following files are simple stubs until they are implemented in + // netstack, most of these files are configuration related. We use the + // value closest to the actual netstack behavior or any empty file, + // all of these files will have mode 0444 (read-only for all users). + contents := map[string]*fs.Inode{ + "default_qdisc": newStaticProcInode(ctx, msrc, []byte("pfifo_fast")), + "message_burst": newStaticProcInode(ctx, msrc, []byte("10")), + "message_cost": newStaticProcInode(ctx, msrc, []byte("5")), + "optmem_max": newStaticProcInode(ctx, msrc, []byte("0")), + "rmem_default": newStaticProcInode(ctx, msrc, []byte("212992")), + "rmem_max": newStaticProcInode(ctx, msrc, []byte("212992")), + "somaxconn": newStaticProcInode(ctx, msrc, []byte("128")), + "wmem_default": newStaticProcInode(ctx, msrc, []byte("212992")), + "wmem_max": newStaticProcInode(ctx, msrc, []byte("212992")), + } + + d := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode { + contents := map[string]*fs.Inode{ + // Add tcp_sack. + "tcp_sack": newTCPSackInode(ctx, msrc, s), + + // The following files are simple stubs until they are + // implemented in netstack, most of these files are + // configuration related. We use the value closest to the + // actual netstack behavior or any empty file, all of these + // files will have mode 0444 (read-only for all users). + "ip_local_port_range": newStaticProcInode(ctx, msrc, []byte("16000 65535")), + "ip_local_reserved_ports": newStaticProcInode(ctx, msrc, []byte("")), + "ipfrag_time": newStaticProcInode(ctx, msrc, []byte("30")), + "ip_nonlocal_bind": newStaticProcInode(ctx, msrc, []byte("0")), + "ip_no_pmtu_disc": newStaticProcInode(ctx, msrc, []byte("1")), + + // tcp_allowed_congestion_control tell the user what they are + // able to do as an unprivledged process so we leave it empty. + "tcp_allowed_congestion_control": newStaticProcInode(ctx, msrc, []byte("")), + "tcp_available_congestion_control": newStaticProcInode(ctx, msrc, []byte("reno")), + "tcp_congestion_control": newStaticProcInode(ctx, msrc, []byte("reno")), + + // Many of the following stub files are features netstack + // doesn't support. The unsupported features return "0" to + // indicate they are disabled. + "tcp_base_mss": newStaticProcInode(ctx, msrc, []byte("1280")), + "tcp_dsack": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_early_retrans": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_fack": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_fastopen": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_fastopen_key": newStaticProcInode(ctx, msrc, []byte("")), + "tcp_invalid_ratelimit": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_keepalive_intvl": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_keepalive_probes": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_keepalive_time": newStaticProcInode(ctx, msrc, []byte("7200")), + "tcp_mtu_probing": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_no_metrics_save": newStaticProcInode(ctx, msrc, []byte("1")), + "tcp_probe_interval": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_probe_threshold": newStaticProcInode(ctx, msrc, []byte("0")), + "tcp_retries1": newStaticProcInode(ctx, msrc, []byte("3")), + "tcp_retries2": newStaticProcInode(ctx, msrc, []byte("15")), + "tcp_rfc1337": newStaticProcInode(ctx, msrc, []byte("1")), + "tcp_slow_start_after_idle": newStaticProcInode(ctx, msrc, []byte("1")), + "tcp_synack_retries": newStaticProcInode(ctx, msrc, []byte("5")), + "tcp_syn_retries": newStaticProcInode(ctx, msrc, []byte("3")), + "tcp_timestamps": newStaticProcInode(ctx, msrc, []byte("1")), + } + + // Add tcp_rmem. + if _, err := s.TCPReceiveBufferSize(); err == nil { + contents["tcp_rmem"] = newTCPMemInode(ctx, msrc, s, tcpRMem) + } + + // Add tcp_wmem. + if _, err := s.TCPSendBufferSize(); err == nil { + contents["tcp_wmem"] = newTCPMemInode(ctx, msrc, s, tcpWMem) + } + + d := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newSysNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + var contents map[string]*fs.Inode + // TODO(gvisor.dev/issue/1833): Support for using the network stack in the + // network namespace of the calling process. + if s := p.k.RootNetworkNamespace().Stack(); s != nil { + contents = map[string]*fs.Inode{ + "ipv4": p.newSysNetIPv4Dir(ctx, msrc, s), + "core": p.newSysNetCore(ctx, msrc, s), + } + } + d := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil) +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_sys.go) diff --git a/pkg/sentry/fs/proc/sys_net_state.go b/pkg/sentry/fs/proc/sys_net_state.go new file mode 100644 index 000000000..6eba709c6 --- /dev/null +++ b/pkg/sentry/fs/proc/sys_net_state.go @@ -0,0 +1,42 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import "fmt" + +// beforeSave is invoked by stateify. +func (t *tcpMemInode) beforeSave() { + size, err := readSize(t.dir, t.s) + if err != nil { + panic(fmt.Sprintf("failed to read TCP send / receive buffer sizes: %v", err)) + } + t.size = size +} + +// afterLoad is invoked by stateify. +func (t *tcpMemInode) afterLoad() { + if err := writeSize(t.dir, t.s, t.size); err != nil { + panic(fmt.Sprintf("failed to write previous TCP send / receive buffer sizes [%v]: %v", t.size, err)) + } +} + +// afterLoad is invoked by stateify. +func (s *tcpSack) afterLoad() { + if s.enabled != nil { + if err := s.stack.SetTCPSACKEnabled(*s.enabled); err != nil { + panic(fmt.Sprintf("failed to set previous TCP sack configuration [%v]: %v", *s.enabled, err)) + } + } +} diff --git a/pkg/sentry/fs/proc/sys_net_test.go b/pkg/sentry/fs/proc/sys_net_test.go new file mode 100644 index 000000000..355e83d47 --- /dev/null +++ b/pkg/sentry/fs/proc/sys_net_test.go @@ -0,0 +1,125 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/inet" + "gvisor.dev/gvisor/pkg/usermem" +) + +func TestQuerySendBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + s.TCPSendBufSize = inet.TCPBufferSize{100, 200, 300} + tmi := &tcpMemInode{s: s, dir: tcpWMem} + tmf := &tcpMemFile{tcpMemInode: tmi} + + buf := make([]byte, 100) + dst := usermem.BytesIOSequence(buf) + n, err := tmf.Read(ctx, nil, dst, 0) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if got, want := string(buf[:n]), "100\t200\t300\n"; got != want { + t.Fatalf("Bad string: got %v, want %v", got, want) + } +} + +func TestQueryRecvBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + s.TCPRecvBufSize = inet.TCPBufferSize{100, 200, 300} + tmi := &tcpMemInode{s: s, dir: tcpRMem} + tmf := &tcpMemFile{tcpMemInode: tmi} + + buf := make([]byte, 100) + dst := usermem.BytesIOSequence(buf) + n, err := tmf.Read(ctx, nil, dst, 0) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + + if got, want := string(buf[:n]), "100\t200\t300\n"; got != want { + t.Fatalf("Bad string: got %v, want %v", got, want) + } +} + +var cases = []struct { + str string + initial inet.TCPBufferSize + final inet.TCPBufferSize +}{ + { + str: "", + initial: inet.TCPBufferSize{1, 2, 3}, + final: inet.TCPBufferSize{1, 2, 3}, + }, + { + str: "100\n", + initial: inet.TCPBufferSize{1, 100, 200}, + final: inet.TCPBufferSize{100, 100, 200}, + }, + { + str: "100 200 300\n", + initial: inet.TCPBufferSize{1, 2, 3}, + final: inet.TCPBufferSize{100, 200, 300}, + }, +} + +func TestConfigureSendBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + for _, c := range cases { + s.TCPSendBufSize = c.initial + tmi := &tcpMemInode{s: s, dir: tcpWMem} + tmf := &tcpMemFile{tcpMemInode: tmi} + + // Write the values. + src := usermem.BytesIOSequence([]byte(c.str)) + if n, err := tmf.Write(ctx, nil, src, 0); n != int64(len(c.str)) || err != nil { + t.Errorf("Write, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str)) + } + + // Read the values from the stack and check them. + if s.TCPSendBufSize != c.final { + t.Errorf("TCPSendBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPSendBufSize, c.final) + } + } +} + +func TestConfigureRecvBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + for _, c := range cases { + s.TCPRecvBufSize = c.initial + tmi := &tcpMemInode{s: s, dir: tcpRMem} + tmf := &tcpMemFile{tcpMemInode: tmi} + + // Write the values. + src := usermem.BytesIOSequence([]byte(c.str)) + if n, err := tmf.Write(ctx, nil, src, 0); n != int64(len(c.str)) || err != nil { + t.Errorf("Write, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str)) + } + + // Read the values from the stack and check them. + if s.TCPRecvBufSize != c.final { + t.Errorf("TCPRecvBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPRecvBufSize, c.final) + } + } +} diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go new file mode 100644 index 000000000..4bbe90198 --- /dev/null +++ b/pkg/sentry/fs/proc/task.go @@ -0,0 +1,914 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io" + "sort" + "strconv" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/device" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/fsbridge" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/limits" + "gvisor.dev/gvisor/pkg/sentry/mm" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// getTaskMM returns t's MemoryManager. If getTaskMM succeeds, the MemoryManager's +// users count is incremented, and must be decremented by the caller when it is +// no longer in use. +func getTaskMM(t *kernel.Task) (*mm.MemoryManager, error) { + if t.ExitState() == kernel.TaskExitDead { + return nil, syserror.ESRCH + } + var m *mm.MemoryManager + t.WithMuLocked(func(t *kernel.Task) { + m = t.MemoryManager() + }) + if m == nil || !m.IncUsers() { + return nil, io.EOF + } + return m, nil +} + +func checkTaskState(t *kernel.Task) error { + switch t.ExitState() { + case kernel.TaskExitZombie: + return syserror.EACCES + case kernel.TaskExitDead: + return syserror.ESRCH + } + return nil +} + +// taskDir represents a task-level directory. +// +// +stateify savable +type taskDir struct { + ramfs.Dir + + t *kernel.Task +} + +var _ fs.InodeOperations = (*taskDir)(nil) + +// newTaskDir creates a new proc task entry. +func (p *proc) newTaskDir(t *kernel.Task, msrc *fs.MountSource, isThreadGroup bool) *fs.Inode { + contents := map[string]*fs.Inode{ + "auxv": newAuxvec(t, msrc), + "cmdline": newExecArgInode(t, msrc, cmdlineExecArg), + "comm": newComm(t, msrc), + "environ": newExecArgInode(t, msrc, environExecArg), + "exe": newExe(t, msrc), + "fd": newFdDir(t, msrc), + "fdinfo": newFdInfoDir(t, msrc), + "gid_map": newGIDMap(t, msrc), + "io": newIO(t, msrc, isThreadGroup), + "maps": newMaps(t, msrc), + "mountinfo": seqfile.NewSeqFileInode(t, &mountInfoFile{t: t}, msrc), + "mounts": seqfile.NewSeqFileInode(t, &mountsFile{t: t}, msrc), + "net": newNetDir(t, msrc), + "ns": newNamespaceDir(t, msrc), + "oom_score": newOOMScore(t, msrc), + "oom_score_adj": newOOMScoreAdj(t, msrc), + "smaps": newSmaps(t, msrc), + "stat": newTaskStat(t, msrc, isThreadGroup, p.pidns), + "statm": newStatm(t, msrc), + "status": newStatus(t, msrc, p.pidns), + "uid_map": newUIDMap(t, msrc), + } + if isThreadGroup { + contents["task"] = p.newSubtasks(t, msrc) + } + if len(p.cgroupControllers) > 0 { + contents["cgroup"] = newCGroupInode(t, msrc, p.cgroupControllers) + } + + // N.B. taskOwnedInodeOps enforces dumpability-based ownership. + d := &taskDir{ + Dir: *ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0555)), + t: t, + } + return newProcInode(t, d, msrc, fs.SpecialDirectory, t) +} + +// subtasks represents a /proc/TID/task directory. +// +// +stateify savable +type subtasks struct { + ramfs.Dir + + t *kernel.Task + p *proc +} + +var _ fs.InodeOperations = (*subtasks)(nil) + +func (p *proc) newSubtasks(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + s := &subtasks{ + Dir: *ramfs.NewDir(t, nil, fs.RootOwner, fs.FilePermsFromMode(0555)), + t: t, + p: p, + } + return newProcInode(t, s, msrc, fs.SpecialDirectory, t) +} + +// UnstableAttr returns unstable attributes of the subtasks. +func (s *subtasks) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := s.Dir.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + // We can't rely on ramfs' implementation because the task directories are + // generated dynamically. + uattr.Links = uint64(2 + s.t.ThreadGroup().Count()) + return uattr, nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (s *subtasks) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &subtasksFile{t: s.t, pidns: s.p.pidns}), nil +} + +// +stateify savable +type subtasksFile struct { + fsutil.DirFileOperations `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + t *kernel.Task + pidns *kernel.PIDNamespace +} + +// Readdir implements fs.FileOperations.Readdir. +func (f *subtasksFile) Readdir(ctx context.Context, file *fs.File, ser fs.DentrySerializer) (int64, error) { + dirCtx := fs.DirCtx{ + Serializer: ser, + } + + // Note that unlike most Readdir implementations, the offset here is + // not an index into the subtasks, but rather the TID of the next + // subtask to emit. + offset := file.Offset() + + tasks := f.t.ThreadGroup().MemberIDs(f.pidns) + if len(tasks) == 0 { + return offset, syserror.ENOENT + } + + if offset == 0 { + // Serialize "." and "..". + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dot, dotdot := file.Dirent.GetDotAttrs(root) + if err := dirCtx.DirEmit(".", dot); err != nil { + return offset, err + } + if err := dirCtx.DirEmit("..", dotdot); err != nil { + return offset, err + } + } + + // Serialize tasks. + taskInts := make([]int, 0, len(tasks)) + for _, tid := range tasks { + taskInts = append(taskInts, int(tid)) + } + + sort.Sort(sort.IntSlice(taskInts)) + // Find the task to start at. + idx := sort.SearchInts(taskInts, int(offset)) + if idx == len(taskInts) { + return offset, nil + } + taskInts = taskInts[idx:] + + var tid int + for _, tid = range taskInts { + name := strconv.FormatUint(uint64(tid), 10) + attr := fs.GenericDentAttr(fs.SpecialDirectory, device.ProcDevice) + if err := dirCtx.DirEmit(name, attr); err != nil { + // Returned offset is next tid to serialize. + return int64(tid), err + } + } + // We serialized them all. Next offset should be higher than last + // serialized tid. + return int64(tid) + 1, nil +} + +var _ fs.FileOperations = (*subtasksFile)(nil) + +// Lookup loads an Inode in a task's subtask directory into a Dirent. +func (s *subtasks) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + tid, err := strconv.ParseUint(p, 10, 32) + if err != nil { + return nil, syserror.ENOENT + } + + task := s.p.pidns.TaskWithID(kernel.ThreadID(tid)) + if task == nil { + return nil, syserror.ENOENT + } + if task.ThreadGroup() != s.t.ThreadGroup() { + return nil, syserror.ENOENT + } + + td := s.p.newTaskDir(task, dir.MountSource, false) + return fs.NewDirent(ctx, td, p), nil +} + +// exe is an fs.InodeOperations symlink for the /proc/PID/exe file. +// +// +stateify savable +type exe struct { + ramfs.Symlink + + t *kernel.Task +} + +func newExe(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + exeSymlink := &exe{ + Symlink: *ramfs.NewSymlink(t, fs.RootOwner, ""), + t: t, + } + return newProcInode(t, exeSymlink, msrc, fs.Symlink, t) +} + +func (e *exe) executable() (file fsbridge.File, err error) { + if err := checkTaskState(e.t); err != nil { + return nil, err + } + e.t.WithMuLocked(func(t *kernel.Task) { + mm := t.MemoryManager() + if mm == nil { + err = syserror.EACCES + return + } + + // The MemoryManager may be destroyed, in which case + // MemoryManager.destroy will simply set the executable to nil + // (with locks held). + file = mm.Executable() + if file == nil { + err = syserror.ESRCH + } + }) + return +} + +// Readlink implements fs.InodeOperations. +func (e *exe) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if !kernel.ContextCanTrace(ctx, e.t, false) { + return "", syserror.EACCES + } + + // Pull out the executable for /proc/TID/exe. + exec, err := e.executable() + if err != nil { + return "", err + } + defer exec.DecRef() + + return exec.PathnameWithDeleted(ctx), nil +} + +// namespaceSymlink represents a symlink in the namespacefs, such as the files +// in /proc/<pid>/ns. +// +// +stateify savable +type namespaceSymlink struct { + ramfs.Symlink + + t *kernel.Task +} + +func newNamespaceSymlink(t *kernel.Task, msrc *fs.MountSource, name string) *fs.Inode { + // TODO(rahat): Namespace symlinks should contain the namespace name and the + // inode number for the namespace instance, so for example user:[123456]. We + // currently fake the inode number by sticking the symlink inode in its + // place. + target := fmt.Sprintf("%s:[%d]", name, device.ProcDevice.NextIno()) + n := &namespaceSymlink{ + Symlink: *ramfs.NewSymlink(t, fs.RootOwner, target), + t: t, + } + return newProcInode(t, n, msrc, fs.Symlink, t) +} + +// Readlink reads the symlink value. +func (n *namespaceSymlink) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if err := checkTaskState(n.t); err != nil { + return "", err + } + return n.Symlink.Readlink(ctx, inode) +} + +// Getlink implements fs.InodeOperations.Getlink. +func (n *namespaceSymlink) Getlink(ctx context.Context, inode *fs.Inode) (*fs.Dirent, error) { + if !kernel.ContextCanTrace(ctx, n.t, false) { + return nil, syserror.EACCES + } + if err := checkTaskState(n.t); err != nil { + return nil, err + } + + // Create a new regular file to fake the namespace file. + iops := fsutil.NewNoReadWriteFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0777), linux.PROC_SUPER_MAGIC) + return fs.NewDirent(ctx, newProcInode(ctx, iops, inode.MountSource, fs.RegularFile, nil), n.Symlink.Target), nil +} + +func newNamespaceDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + contents := map[string]*fs.Inode{ + "net": newNamespaceSymlink(t, msrc, "net"), + "pid": newNamespaceSymlink(t, msrc, "pid"), + "user": newNamespaceSymlink(t, msrc, "user"), + } + d := ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0511)) + return newProcInode(t, d, msrc, fs.SpecialDirectory, t) +} + +// mapsData implements seqfile.SeqSource for /proc/[pid]/maps. +// +// +stateify savable +type mapsData struct { + t *kernel.Task +} + +func newMaps(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newProcInode(t, seqfile.NewSeqFile(t, &mapsData{t}), msrc, fs.SpecialFile, t) +} + +func (md *mapsData) mm() *mm.MemoryManager { + var tmm *mm.MemoryManager + md.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + // No additional reference is taken on mm here. This is safe + // because MemoryManager.destroy is required to leave the + // MemoryManager in a state where it's still usable as a SeqSource. + tmm = mm + } + }) + return tmm +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (md *mapsData) NeedsUpdate(generation int64) bool { + if mm := md.mm(); mm != nil { + return mm.NeedsUpdate(generation) + } + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (md *mapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if mm := md.mm(); mm != nil { + return mm.ReadMapsSeqFileData(ctx, h) + } + return []seqfile.SeqData{}, 0 +} + +// smapsData implements seqfile.SeqSource for /proc/[pid]/smaps. +// +// +stateify savable +type smapsData struct { + t *kernel.Task +} + +func newSmaps(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newProcInode(t, seqfile.NewSeqFile(t, &smapsData{t}), msrc, fs.SpecialFile, t) +} + +func (sd *smapsData) mm() *mm.MemoryManager { + var tmm *mm.MemoryManager + sd.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + // No additional reference is taken on mm here. This is safe + // because MemoryManager.destroy is required to leave the + // MemoryManager in a state where it's still usable as a SeqSource. + tmm = mm + } + }) + return tmm +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (sd *smapsData) NeedsUpdate(generation int64) bool { + if mm := sd.mm(); mm != nil { + return mm.NeedsUpdate(generation) + } + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (sd *smapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if mm := sd.mm(); mm != nil { + return mm.ReadSmapsSeqFileData(ctx, h) + } + return []seqfile.SeqData{}, 0 +} + +// +stateify savable +type taskStatData struct { + t *kernel.Task + + // If tgstats is true, accumulate fault stats (not implemented) and CPU + // time across all tasks in t's thread group. + tgstats bool + + // pidns is the PID namespace associated with the proc filesystem that + // includes the file using this statData. + pidns *kernel.PIDNamespace +} + +func newTaskStat(t *kernel.Task, msrc *fs.MountSource, showSubtasks bool, pidns *kernel.PIDNamespace) *fs.Inode { + return newProcInode(t, seqfile.NewSeqFile(t, &taskStatData{t, showSubtasks /* tgstats */, pidns}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate returns whether the generation is old or not. +func (s *taskStatData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfTask(s.t)) + fmt.Fprintf(&buf, "(%s) ", s.t.Name()) + fmt.Fprintf(&buf, "%c ", s.t.StateStatus()[0]) + ppid := kernel.ThreadID(0) + if parent := s.t.Parent(); parent != nil { + ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup()) + } + fmt.Fprintf(&buf, "%d ", ppid) + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfProcessGroup(s.t.ThreadGroup().ProcessGroup())) + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfSession(s.t.ThreadGroup().Session())) + fmt.Fprintf(&buf, "0 0 " /* tty_nr tpgid */) + fmt.Fprintf(&buf, "0 " /* flags */) + fmt.Fprintf(&buf, "0 0 0 0 " /* minflt cminflt majflt cmajflt */) + var cputime usage.CPUStats + if s.tgstats { + cputime = s.t.ThreadGroup().CPUStats() + } else { + cputime = s.t.CPUStats() + } + fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime)) + cputime = s.t.ThreadGroup().JoinedChildCPUStats() + fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime)) + fmt.Fprintf(&buf, "%d %d ", s.t.Priority(), s.t.Niceness()) + fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Count()) + + // itrealvalue. Since kernel 2.6.17, this field is no longer + // maintained, and is hard coded as 0. + fmt.Fprintf(&buf, "0 ") + + // Start time is relative to boot time, expressed in clock ticks. + fmt.Fprintf(&buf, "%d ", linux.ClockTFromDuration(s.t.StartTime().Sub(s.t.Kernel().Timekeeper().BootTime()))) + + var vss, rss uint64 + s.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + vss = mm.VirtualMemorySize() + rss = mm.ResidentSetSize() + } + }) + fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize) + + // rsslim. + fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Limits().Get(limits.Rss).Cur) + + fmt.Fprintf(&buf, "0 0 0 0 0 " /* startcode endcode startstack kstkesp kstkeip */) + fmt.Fprintf(&buf, "0 0 0 0 0 " /* signal blocked sigignore sigcatch wchan */) + fmt.Fprintf(&buf, "0 0 " /* nswap cnswap */) + terminationSignal := linux.Signal(0) + if s.t == s.t.ThreadGroup().Leader() { + terminationSignal = s.t.ThreadGroup().TerminationSignal() + } + fmt.Fprintf(&buf, "%d ", terminationSignal) + fmt.Fprintf(&buf, "0 0 0 " /* processor rt_priority policy */) + fmt.Fprintf(&buf, "0 0 0 " /* delayacct_blkio_ticks guest_time cguest_time */) + fmt.Fprintf(&buf, "0 0 0 0 0 0 0 " /* start_data end_data start_brk arg_start arg_end env_start env_end */) + fmt.Fprintf(&buf, "0\n" /* exit_code */) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*taskStatData)(nil)}}, 0 +} + +// statmData implements seqfile.SeqSource for /proc/[pid]/statm. +// +// +stateify savable +type statmData struct { + t *kernel.Task +} + +func newStatm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newProcInode(t, seqfile.NewSeqFile(t, &statmData{t}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (s *statmData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var vss, rss uint64 + s.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + vss = mm.VirtualMemorySize() + rss = mm.ResidentSetSize() + } + }) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/usermem.PageSize, rss/usermem.PageSize) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statmData)(nil)}}, 0 +} + +// statusData implements seqfile.SeqSource for /proc/[pid]/status. +// +// +stateify savable +type statusData struct { + t *kernel.Task + pidns *kernel.PIDNamespace +} + +func newStatus(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace) *fs.Inode { + return newProcInode(t, seqfile.NewSeqFile(t, &statusData{t, pidns}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (s *statusData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + fmt.Fprintf(&buf, "Name:\t%s\n", s.t.Name()) + fmt.Fprintf(&buf, "State:\t%s\n", s.t.StateStatus()) + fmt.Fprintf(&buf, "Tgid:\t%d\n", s.pidns.IDOfThreadGroup(s.t.ThreadGroup())) + fmt.Fprintf(&buf, "Pid:\t%d\n", s.pidns.IDOfTask(s.t)) + ppid := kernel.ThreadID(0) + if parent := s.t.Parent(); parent != nil { + ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup()) + } + fmt.Fprintf(&buf, "PPid:\t%d\n", ppid) + tpid := kernel.ThreadID(0) + if tracer := s.t.Tracer(); tracer != nil { + tpid = s.pidns.IDOfTask(tracer) + } + fmt.Fprintf(&buf, "TracerPid:\t%d\n", tpid) + var fds int + var vss, rss, data uint64 + s.t.WithMuLocked(func(t *kernel.Task) { + if fdTable := t.FDTable(); fdTable != nil { + fds = fdTable.Size() + } + if mm := t.MemoryManager(); mm != nil { + vss = mm.VirtualMemorySize() + rss = mm.ResidentSetSize() + data = mm.VirtualDataSize() + } + }) + fmt.Fprintf(&buf, "FDSize:\t%d\n", fds) + fmt.Fprintf(&buf, "VmSize:\t%d kB\n", vss>>10) + fmt.Fprintf(&buf, "VmRSS:\t%d kB\n", rss>>10) + fmt.Fprintf(&buf, "VmData:\t%d kB\n", data>>10) + fmt.Fprintf(&buf, "Threads:\t%d\n", s.t.ThreadGroup().Count()) + creds := s.t.Credentials() + fmt.Fprintf(&buf, "CapInh:\t%016x\n", creds.InheritableCaps) + fmt.Fprintf(&buf, "CapPrm:\t%016x\n", creds.PermittedCaps) + fmt.Fprintf(&buf, "CapEff:\t%016x\n", creds.EffectiveCaps) + fmt.Fprintf(&buf, "CapBnd:\t%016x\n", creds.BoundingCaps) + fmt.Fprintf(&buf, "Seccomp:\t%d\n", s.t.SeccompMode()) + // We unconditionally report a single NUMA node. See + // pkg/sentry/syscalls/linux/sys_mempolicy.go. + fmt.Fprintf(&buf, "Mems_allowed:\t1\n") + fmt.Fprintf(&buf, "Mems_allowed_list:\t0\n") + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statusData)(nil)}}, 0 +} + +// ioUsage is the /proc/<pid>/io and /proc/<pid>/task/<tid>/io data provider. +type ioUsage interface { + // IOUsage returns the io usage data. + IOUsage() *usage.IO +} + +// +stateify savable +type ioData struct { + ioUsage +} + +func newIO(t *kernel.Task, msrc *fs.MountSource, isThreadGroup bool) *fs.Inode { + if isThreadGroup { + return newProcInode(t, seqfile.NewSeqFile(t, &ioData{t.ThreadGroup()}), msrc, fs.SpecialFile, t) + } + return newProcInode(t, seqfile.NewSeqFile(t, &ioData{t}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate returns whether the generation is old or not. +func (i *ioData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (i *ioData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + io := usage.IO{} + io.Accumulate(i.IOUsage()) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "rchar: %d\n", io.CharsRead) + fmt.Fprintf(&buf, "wchar: %d\n", io.CharsWritten) + fmt.Fprintf(&buf, "syscr: %d\n", io.ReadSyscalls) + fmt.Fprintf(&buf, "syscw: %d\n", io.WriteSyscalls) + fmt.Fprintf(&buf, "read_bytes: %d\n", io.BytesRead) + fmt.Fprintf(&buf, "write_bytes: %d\n", io.BytesWritten) + fmt.Fprintf(&buf, "cancelled_write_bytes: %d\n", io.BytesWriteCancelled) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*ioData)(nil)}}, 0 +} + +// comm is a file containing the command name for a task. +// +// On Linux, /proc/[pid]/comm is writable, and writing to the comm file changes +// the thread name. We don't implement this yet as there are no known users of +// this feature. +// +// +stateify savable +type comm struct { + fsutil.SimpleFileInode + + t *kernel.Task +} + +// newComm returns a new comm file. +func newComm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + c := &comm{ + SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + t: t, + } + return newProcInode(t, c, msrc, fs.SpecialFile, t) +} + +// Check implements fs.InodeOperations.Check. +func (c *comm) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + // This file can always be read or written by members of the same + // thread group. See fs/proc/base.c:proc_tid_comm_permission. + // + // N.B. This check is currently a no-op as we don't yet support writing + // and this file is world-readable anyways. + t := kernel.TaskFromContext(ctx) + if t != nil && t.ThreadGroup() == c.t.ThreadGroup() && !p.Execute { + return true + } + + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (c *comm) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &commFile{t: c.t}), nil +} + +// +stateify savable +type commFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + t *kernel.Task +} + +var _ fs.FileOperations = (*commFile)(nil) + +// Read implements fs.FileOperations.Read. +func (f *commFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + buf := []byte(f.t.Name() + "\n") + if offset >= int64(len(buf)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, buf[offset:]) + return int64(n), err +} + +// auxvec is a file containing the auxiliary vector for a task. +// +// +stateify savable +type auxvec struct { + fsutil.SimpleFileInode + + t *kernel.Task +} + +// newAuxvec returns a new auxvec file. +func newAuxvec(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + a := &auxvec{ + SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + t: t, + } + return newProcInode(t, a, msrc, fs.SpecialFile, t) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (a *auxvec) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &auxvecFile{t: a.t}), nil +} + +// +stateify savable +type auxvecFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + t *kernel.Task +} + +// Read implements fs.FileOperations.Read. +func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + m, err := getTaskMM(f.t) + if err != nil { + return 0, err + } + defer m.DecUsers(ctx) + auxv := m.Auxv() + + // Space for buffer with AT_NULL (0) terminator at the end. + size := (len(auxv) + 1) * 16 + if offset >= int64(size) { + return 0, io.EOF + } + + buf := make([]byte, size) + for i, e := range auxv { + usermem.ByteOrder.PutUint64(buf[16*i:], e.Key) + usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value)) + } + + n, err := dst.CopyOut(ctx, buf[offset:]) + return int64(n), err +} + +// newOOMScore returns a oom_score file. It is a stub that always returns 0. +// TODO(gvisor.dev/issue/1967) +func newOOMScore(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newStaticProcInode(t, msrc, []byte("0\n")) +} + +// oomScoreAdj is a file containing the oom_score adjustment for a task. +// +// +stateify savable +type oomScoreAdj struct { + fsutil.SimpleFileInode + + t *kernel.Task +} + +// +stateify savable +type oomScoreAdjFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + t *kernel.Task +} + +// newOOMScoreAdj returns a oom_score_adj file. +func newOOMScoreAdj(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + i := &oomScoreAdj{ + SimpleFileInode: *fsutil.NewSimpleFileInode(t, fs.RootOwner, fs.FilePermsFromMode(0644), linux.PROC_SUPER_MAGIC), + t: t, + } + return newProcInode(t, i, msrc, fs.SpecialFile, t) +} + +// Truncate implements fs.InodeOperations.Truncate. Truncate is called when +// O_TRUNC is specified for any kind of existing Dirent but is not called via +// (f)truncate for proc files. +func (*oomScoreAdj) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (o *oomScoreAdj) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &oomScoreAdjFile{t: o.t}), nil +} + +// Read implements fs.FileOperations.Read. +func (f *oomScoreAdjFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if f.t.ExitState() == kernel.TaskExitDead { + return 0, syserror.ESRCH + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "%d\n", f.t.OOMScoreAdj()) + if offset >= int64(buf.Len()) { + return 0, io.EOF + } + n, err := dst.CopyOut(ctx, buf.Bytes()[offset:]) + return int64(n), err +} + +// Write implements fs.FileOperations.Write. +func (f *oomScoreAdjFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + + // Limit input size so as not to impact performance if input size is large. + src = src.TakeFirst(usermem.PageSize - 1) + + var v int32 + n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) + if err != nil { + return 0, err + } + + if f.t.ExitState() == kernel.TaskExitDead { + return 0, syserror.ESRCH + } + if err := f.t.SetOOMScoreAdj(v); err != nil { + return 0, err + } + + return n, nil +} + +// LINT.ThenChange(../../fsimpl/proc/task.go|../../fsimpl/proc/task_files.go) diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go new file mode 100644 index 000000000..8d9517b95 --- /dev/null +++ b/pkg/sentry/fs/proc/uid_gid_map.go @@ -0,0 +1,183 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// idMapInodeOperations implements fs.InodeOperations for +// /proc/[pid]/{uid,gid}_map. +// +// +stateify savable +type idMapInodeOperations struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeSimpleExtendedAttributes + + t *kernel.Task + gids bool +} + +var _ fs.InodeOperations = (*idMapInodeOperations)(nil) + +// newUIDMap returns a new uid_map file. +func newUIDMap(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newIDMap(t, msrc, false /* gids */) +} + +// newGIDMap returns a new gid_map file. +func newGIDMap(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newIDMap(t, msrc, true /* gids */) +} + +func newIDMap(t *kernel.Task, msrc *fs.MountSource, gids bool) *fs.Inode { + return newProcInode(t, &idMapInodeOperations{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(t, fs.RootOwner, fs.FilePermsFromMode(0644), linux.PROC_SUPER_MAGIC), + t: t, + gids: gids, + }, msrc, fs.SpecialFile, t) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (imio *idMapInodeOperations) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &idMapFileOperations{ + iops: imio, + }), nil +} + +// +stateify savable +type idMapFileOperations struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + iops *idMapInodeOperations +} + +var _ fs.FileOperations = (*idMapFileOperations)(nil) + +// "There is an (arbitrary) limit on the number of lines in the file. As at +// Linux 3.18, the limit is five lines." - user_namespaces(7) +const maxIDMapLines = 5 + +// Read implements fs.FileOperations.Read. +func (imfo *idMapFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + var entries []auth.IDMapEntry + if imfo.iops.gids { + entries = imfo.iops.t.UserNamespace().GIDMap() + } else { + entries = imfo.iops.t.UserNamespace().UIDMap() + } + var buf bytes.Buffer + for _, e := range entries { + fmt.Fprintf(&buf, "%10d %10d %10d\n", e.FirstID, e.FirstParentID, e.Length) + } + if offset >= int64(buf.Len()) { + return 0, io.EOF + } + n, err := dst.CopyOut(ctx, buf.Bytes()[offset:]) + return int64(n), err +} + +// Write implements fs.FileOperations.Write. +func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + // "In addition, the number of bytes written to the file must be less than + // the system page size, and the write must be performed at the start of + // the file ..." - user_namespaces(7) + srclen := src.NumBytes() + if srclen >= usermem.PageSize || offset != 0 { + return 0, syserror.EINVAL + } + b := make([]byte, srclen) + if _, err := src.CopyIn(ctx, b); err != nil { + return 0, err + } + + // Truncate from the first NULL byte. + var nul int64 + nul = int64(bytes.IndexByte(b, 0)) + if nul == -1 { + nul = srclen + } + b = b[:nul] + // Remove the last \n. + if nul >= 1 && b[nul-1] == '\n' { + b = b[:nul-1] + } + lines := bytes.SplitN(b, []byte("\n"), maxIDMapLines+1) + if len(lines) > maxIDMapLines { + return 0, syserror.EINVAL + } + + entries := make([]auth.IDMapEntry, len(lines)) + for i, l := range lines { + var e auth.IDMapEntry + _, err := fmt.Sscan(string(l), &e.FirstID, &e.FirstParentID, &e.Length) + if err != nil { + return 0, syserror.EINVAL + } + entries[i] = e + } + var err error + if imfo.iops.gids { + err = imfo.iops.t.UserNamespace().SetGIDMap(ctx, entries) + } else { + err = imfo.iops.t.UserNamespace().SetUIDMap(ctx, entries) + } + if err != nil { + return 0, err + } + + // On success, Linux's kernel/user_namespace.c:map_write() always returns + // count, even if fewer bytes were used. + return int64(srclen), nil +} + +// LINT.ThenChange(../../fsimpl/proc/task_files.go) diff --git a/pkg/sentry/fs/proc/uptime.go b/pkg/sentry/fs/proc/uptime.go new file mode 100644 index 000000000..c0f6fb802 --- /dev/null +++ b/pkg/sentry/fs/proc/uptime.go @@ -0,0 +1,91 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// uptime is a file containing the system uptime. +// +// +stateify savable +type uptime struct { + fsutil.SimpleFileInode + + // The "start time" of the sandbox. + startTime ktime.Time +} + +// newUptime returns a new uptime file. +func newUptime(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + u := &uptime{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC), + startTime: ktime.NowFromContext(ctx), + } + return newProcInode(ctx, u, msrc, fs.SpecialFile, nil) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (u *uptime) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &uptimeFile{startTime: u.startTime}), nil +} + +// +stateify savable +type uptimeFile struct { + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + startTime ktime.Time +} + +// Read implements fs.FileOperations.Read. +func (f *uptimeFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + now := ktime.NowFromContext(ctx) + // Pretend that we've spent zero time sleeping (second number). + s := []byte(fmt.Sprintf("%.2f 0.00\n", now.Sub(f.startTime).Seconds())) + if offset >= int64(len(s)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, s[offset:]) + return int64(n), err +} + +// LINT.ThenChange(../../fsimpl/proc/tasks_files.go) diff --git a/pkg/sentry/fs/proc/version.go b/pkg/sentry/fs/proc/version.go new file mode 100644 index 000000000..35e258ff6 --- /dev/null +++ b/pkg/sentry/fs/proc/version.go @@ -0,0 +1,82 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.dev/gvisor/pkg/sentry/kernel" +) + +// LINT.IfChange + +// versionData backs /proc/version. +// +// +stateify savable +type versionData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*versionData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (v *versionData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + init := v.k.GlobalInit() + if init == nil { + // Attempted to read before the init Task is created. This can + // only occur during startup, which should never need to read + // this file. + panic("Attempted to read version before initial Task is available") + } + + // /proc/version takes the form: + // + // "SYSNAME version RELEASE (COMPILE_USER@COMPILE_HOST) + // (COMPILER_VERSION) VERSION" + // + // where: + // - SYSNAME, RELEASE, and VERSION are the same as returned by + // sys_utsname + // - COMPILE_USER is the user that build the kernel + // - COMPILE_HOST is the hostname of the machine on which the kernel + // was built + // - COMPILER_VERSION is the version reported by the building compiler + // + // Since we don't really want to expose build information to + // applications, those fields are omitted. + // + // FIXME(mpratt): Using Version from the init task SyscallTable + // disregards the different version a task may have (e.g., in a uts + // namespace). + ver := init.Leader().SyscallTable().Version + return []seqfile.SeqData{ + { + Buf: []byte(fmt.Sprintf("%s version %s %s\n", ver.Sysname, ver.Release, ver.Version)), + Handle: (*versionData)(nil), + }, + }, 0 +} + +// LINT.ThenChange(../../fsimpl/proc/task_files.go) diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD new file mode 100644 index 000000000..8ca823fb3 --- /dev/null +++ b/pkg/sentry/fs/ramfs/BUILD @@ -0,0 +1,37 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "ramfs", + srcs = [ + "dir.go", + "socket.go", + "symlink.go", + "tree.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/socket/unix/transport", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "ramfs_test", + size = "small", + srcs = ["tree_test.go"], + library = ":ramfs", + deps = [ + "//pkg/sentry/contexttest", + "//pkg/sentry/fs", + ], +) diff --git a/pkg/sentry/fs/ramfs/dir.go b/pkg/sentry/fs/ramfs/dir.go new file mode 100644 index 000000000..bfa304552 --- /dev/null +++ b/pkg/sentry/fs/ramfs/dir.go @@ -0,0 +1,548 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ramfs provides the fundamentals for a simple in-memory filesystem. +package ramfs + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" +) + +// CreateOps represents operations to create different file types. +type CreateOps struct { + // NewDir creates a new directory. + NewDir func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) + + // NewFile creates a new file. + NewFile func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) + + // NewSymlink creates a new symlink with permissions 0777. + NewSymlink func(ctx context.Context, dir *fs.Inode, target string) (*fs.Inode, error) + + // NewBoundEndpoint creates a new socket. + NewBoundEndpoint func(ctx context.Context, dir *fs.Inode, ep transport.BoundEndpoint, perms fs.FilePermissions) (*fs.Inode, error) + + // NewFifo creates a new fifo. + NewFifo func(ctx context.Context, dir *fs.Inode, perm fs.FilePermissions) (*fs.Inode, error) +} + +// Dir represents a single directory in the filesystem. +// +// +stateify savable +type Dir struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeIsDirAllocate `state:"nosave"` + fsutil.InodeIsDirTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeSimpleExtendedAttributes + + // CreateOps may be provided. + // + // These may only be modified during initialization (while the application + // is not running). No sychronization is performed when accessing these + // operations during syscalls. + *CreateOps `state:"nosave"` + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // children are inodes that are in this directory. A reference is held + // on each inode while it is in the map. + children map[string]*fs.Inode + + // dentryMap is a sortedDentryMap containing entries for all children. + // Its entries are kept up-to-date with d.children. + dentryMap *fs.SortedDentryMap +} + +var _ fs.InodeOperations = (*Dir)(nil) + +// NewDir returns a new Dir with the given contents and attributes. A reference +// on each fs.Inode in the `contents` map will be donated to this Dir. +func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwner, perms fs.FilePermissions) *Dir { + d := &Dir{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, perms, linux.RAMFS_MAGIC), + } + + if contents == nil { + contents = make(map[string]*fs.Inode) + } + d.children = contents + + // Build the entries map ourselves, rather than calling addChildLocked, + // because it will be faster. + entries := make(map[string]fs.DentAttr, len(contents)) + for name, inode := range contents { + entries[name] = fs.DentAttr{ + Type: inode.StableAttr.Type, + InodeID: inode.StableAttr.InodeID, + } + } + d.dentryMap = fs.NewSortedDentryMap(entries) + + // Directories have an extra link, corresponding to '.'. + d.AddLink() + + return d +} + +// addChildLocked add the child inode, inheriting its reference. +func (d *Dir) addChildLocked(ctx context.Context, name string, inode *fs.Inode) { + d.children[name] = inode + d.dentryMap.Add(name, fs.DentAttr{ + Type: inode.StableAttr.Type, + InodeID: inode.StableAttr.InodeID, + }) + + // If the child is a directory, increment this dir's link count, + // corresponding to '..' from the subdirectory. + if fs.IsDir(inode.StableAttr) { + d.AddLink() + // ctime updated below. + } + + // Given we're now adding this inode to the directory we must also + // increase its link count. Similarly we decrement it in removeChildLocked. + // + // Changing link count updates ctime. + inode.AddLink() + inode.InodeOperations.NotifyStatusChange(ctx) + + // We've change the directory. This always updates our mtime and ctime. + d.NotifyModificationAndStatusChange(ctx) +} + +// AddChild adds a child to this dir, inheriting its reference. +func (d *Dir) AddChild(ctx context.Context, name string, inode *fs.Inode) { + d.mu.Lock() + defer d.mu.Unlock() + d.addChildLocked(ctx, name, inode) +} + +// FindChild returns (child, true) if the directory contains name. +func (d *Dir) FindChild(name string) (*fs.Inode, bool) { + d.mu.Lock() + defer d.mu.Unlock() + child, ok := d.children[name] + return child, ok +} + +// Children returns the names and DentAttrs of all children. It can be used to +// implement Readdir for types that embed ramfs.Dir. +func (d *Dir) Children() ([]string, map[string]fs.DentAttr) { + d.mu.Lock() + defer d.mu.Unlock() + + // Return a copy to prevent callers from modifying our children. + names, entries := d.dentryMap.GetAll() + namesCopy := make([]string, len(names)) + copy(namesCopy, names) + + entriesCopy := make(map[string]fs.DentAttr) + for k, v := range entries { + entriesCopy[k] = v + } + + return namesCopy, entriesCopy +} + +// removeChildLocked attempts to remove an entry from this directory. It +// returns the removed fs.Inode along with its reference, which callers are +// responsible for decrementing. +func (d *Dir) removeChildLocked(ctx context.Context, name string) (*fs.Inode, error) { + inode, ok := d.children[name] + if !ok { + return nil, syserror.EACCES + } + + delete(d.children, name) + d.dentryMap.Remove(name) + d.NotifyModification(ctx) + + // If the child was a subdirectory, then we must decrement this dir's + // link count which was the child's ".." directory entry. + if fs.IsDir(inode.StableAttr) { + d.DropLink() + // ctime changed below. + } + + // Given we're now removing this inode to the directory we must also + // decrease its link count. Similarly it is increased in addChildLocked. + // + // Changing link count updates ctime. + inode.DropLink() + inode.InodeOperations.NotifyStatusChange(ctx) + + // We've change the directory. This always updates our mtime and ctime. + d.NotifyModificationAndStatusChange(ctx) + + return inode, nil +} + +// Remove removes the named non-directory. +func (d *Dir) Remove(ctx context.Context, _ *fs.Inode, name string) error { + if len(name) > linux.NAME_MAX { + return syserror.ENAMETOOLONG + } + + d.mu.Lock() + defer d.mu.Unlock() + inode, err := d.removeChildLocked(ctx, name) + if err != nil { + return err + } + + // Remove our reference on the inode. + inode.DecRef() + return nil +} + +// RemoveDirectory removes the named directory. +func (d *Dir) RemoveDirectory(ctx context.Context, _ *fs.Inode, name string) error { + if len(name) > linux.NAME_MAX { + return syserror.ENAMETOOLONG + } + + d.mu.Lock() + defer d.mu.Unlock() + + // Get the child and make sure it is not empty. + childInode, err := d.walkLocked(ctx, name) + if err != nil { + return err + } + if ok, err := hasChildren(ctx, childInode); err != nil { + return err + } else if ok { + return syserror.ENOTEMPTY + } + + // Child was empty. Proceed with removal. + inode, err := d.removeChildLocked(ctx, name) + if err != nil { + return err + } + + // Remove our reference on the inode. + inode.DecRef() + + return nil +} + +// Lookup loads an inode at p into a Dirent. It returns the fs.Dirent along +// with a reference. +func (d *Dir) Lookup(ctx context.Context, _ *fs.Inode, p string) (*fs.Dirent, error) { + if len(p) > linux.NAME_MAX { + return nil, syserror.ENAMETOOLONG + } + + d.mu.Lock() + defer d.mu.Unlock() + + inode, err := d.walkLocked(ctx, p) + if err != nil { + return nil, err + } + + // Take a reference on the inode before returning it. This reference + // is owned by the dirent we are about to create. + inode.IncRef() + return fs.NewDirent(ctx, inode, p), nil +} + +// walkLocked must be called with d.mu held. +func (d *Dir) walkLocked(ctx context.Context, p string) (*fs.Inode, error) { + // Lookup a child node. + if inode, ok := d.children[p]; ok { + return inode, nil + } + + // fs.InodeOperations.Lookup returns syserror.ENOENT if p + // does not exist. + return nil, syserror.ENOENT +} + +// createInodeOperationsCommon creates a new child node at this dir by calling +// makeInodeOperations. It is the common logic for creating a new child. +func (d *Dir) createInodeOperationsCommon(ctx context.Context, name string, makeInodeOperations func() (*fs.Inode, error)) (*fs.Inode, error) { + if len(name) > linux.NAME_MAX { + return nil, syserror.ENAMETOOLONG + } + + d.mu.Lock() + defer d.mu.Unlock() + + inode, err := makeInodeOperations() + if err != nil { + return nil, err + } + + d.addChildLocked(ctx, name, inode) + + return inode, nil +} + +// Create creates a new Inode with the given name and returns its File. +func (d *Dir) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perms fs.FilePermissions) (*fs.File, error) { + if d.CreateOps == nil || d.CreateOps.NewFile == nil { + return nil, syserror.EACCES + } + + inode, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewFile(ctx, dir, perms) + }) + if err != nil { + return nil, err + } + + // Take an extra ref on inode, which will be owned by the dirent. + inode.IncRef() + + // Create the Dirent and corresponding file. + created := fs.NewDirent(ctx, inode, name) + defer created.DecRef() + return created.Inode.GetFile(ctx, created, flags) +} + +// CreateLink returns a new link. +func (d *Dir) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error { + if d.CreateOps == nil || d.CreateOps.NewSymlink == nil { + return syserror.EACCES + } + _, err := d.createInodeOperationsCommon(ctx, newname, func() (*fs.Inode, error) { + return d.NewSymlink(ctx, dir, oldname) + }) + return err +} + +// CreateHardLink creates a new hard link. +func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error { + if len(name) > linux.NAME_MAX { + return syserror.ENAMETOOLONG + } + + d.mu.Lock() + defer d.mu.Unlock() + + // Take an extra reference on the inode and add it to our children. + target.IncRef() + + // The link count will be incremented in addChildLocked. + d.addChildLocked(ctx, name, target) + + return nil +} + +// CreateDirectory returns a new subdirectory. +func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + if d.CreateOps == nil || d.CreateOps.NewDir == nil { + return syserror.EACCES + } + _, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewDir(ctx, dir, perms) + }) + return err +} + +// Bind implements fs.InodeOperations.Bind. +func (d *Dir) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport.BoundEndpoint, perms fs.FilePermissions) (*fs.Dirent, error) { + if d.CreateOps == nil || d.CreateOps.NewBoundEndpoint == nil { + return nil, syserror.EACCES + } + inode, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewBoundEndpoint(ctx, dir, ep, perms) + }) + if err == syscall.EEXIST { + return nil, syscall.EADDRINUSE + } + if err != nil { + return nil, err + } + // Take another ref on inode which will be donated to the new dirent. + inode.IncRef() + return fs.NewDirent(ctx, inode, name), nil +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (d *Dir) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + if d.CreateOps == nil || d.CreateOps.NewFifo == nil { + return syserror.EACCES + } + _, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewFifo(ctx, dir, perms) + }) + return err +} + +// GetFile implements fs.InodeOperations.GetFile. +func (d *Dir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + return fs.NewFile(ctx, dirent, flags, &dirFileOperations{dir: d}), nil +} + +// Rename implements fs.InodeOperations.Rename. +func (*Dir) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return Rename(ctx, oldParent.InodeOperations, oldName, newParent.InodeOperations, newName, replacement) +} + +// Release implements fs.InodeOperation.Release. +func (d *Dir) Release(_ context.Context) { + // Drop references on all children. + d.mu.Lock() + for _, i := range d.children { + i.DecRef() + } + d.mu.Unlock() +} + +// dirFileOperations implements fs.FileOperations for a ramfs directory. +// +// +stateify savable +type dirFileOperations struct { + fsutil.DirFileOperations `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + // dirCursor contains the name of the last directory entry that was + // serialized. + dirCursor string + + // dir is the ramfs dir that this file corresponds to. + dir *Dir +} + +var _ fs.FileOperations = (*dirFileOperations)(nil) + +// Seek implements fs.FileOperations.Seek. +func (dfo *dirFileOperations) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return fsutil.SeekWithDirCursor(ctx, file, whence, offset, &dfo.dirCursor) +} + +// IterateDir implements DirIterator.IterateDir. +func (dfo *dirFileOperations) IterateDir(ctx context.Context, d *fs.Dirent, dirCtx *fs.DirCtx, offset int) (int, error) { + dfo.dir.mu.Lock() + defer dfo.dir.mu.Unlock() + + n, err := fs.GenericReaddir(dirCtx, dfo.dir.dentryMap) + return offset + n, err +} + +// Readdir implements FileOperations.Readdir. +func (dfo *dirFileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &dfo.dirCursor, + } + dfo.dir.InodeSimpleAttributes.NotifyAccess(ctx) + return fs.DirentReaddir(ctx, file.Dirent, dfo, root, dirCtx, file.Offset()) +} + +// hasChildren is a helper method that determines whether an arbitrary inode +// (not necessarily ramfs) has any children. +func hasChildren(ctx context.Context, inode *fs.Inode) (bool, error) { + // Take an extra ref on inode which will be given to the dirent and + // dropped when that dirent is destroyed. + inode.IncRef() + d := fs.NewTransientDirent(inode) + defer d.DecRef() + + file, err := inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + return false, err + } + defer file.DecRef() + + ser := &fs.CollectEntriesSerializer{} + if err := file.Readdir(ctx, ser); err != nil { + return false, err + } + // We will always write "." and "..", so ignore those two. + if ser.Written() > 2 { + return true, nil + } + return false, nil +} + +// Rename renames from a *ramfs.Dir to another *ramfs.Dir. +func Rename(ctx context.Context, oldParent fs.InodeOperations, oldName string, newParent fs.InodeOperations, newName string, replacement bool) error { + op, ok := oldParent.(*Dir) + if !ok { + return syserror.EXDEV + } + np, ok := newParent.(*Dir) + if !ok { + return syserror.EXDEV + } + if len(newName) > linux.NAME_MAX { + return syserror.ENAMETOOLONG + } + + np.mu.Lock() + defer np.mu.Unlock() + + // Is this is an overwriting rename? + if replacement { + replaced, ok := np.children[newName] + if !ok { + panic(fmt.Sprintf("Dirent claims rename is replacement, but %q is missing from %+v", newName, np)) + } + + // Non-empty directories cannot be replaced. + if fs.IsDir(replaced.StableAttr) { + if ok, err := hasChildren(ctx, replaced); err != nil { + return err + } else if ok { + return syserror.ENOTEMPTY + } + } + + // Remove the replaced child and drop our reference on it. + inode, err := np.removeChildLocked(ctx, newName) + if err != nil { + return err + } + inode.DecRef() + } + + // Be careful, we may have already grabbed this mutex above. + if op != np { + op.mu.Lock() + defer op.mu.Unlock() + } + + // Do the swap. + n := op.children[oldName] + op.removeChildLocked(ctx, oldName) + np.addChildLocked(ctx, newName, n) + + return nil +} diff --git a/pkg/sentry/fs/ramfs/socket.go b/pkg/sentry/fs/ramfs/socket.go new file mode 100644 index 000000000..29ff004f2 --- /dev/null +++ b/pkg/sentry/fs/ramfs/socket.go @@ -0,0 +1,85 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/waiter" +) + +// Socket represents a socket. +// +// +stateify savable +type Socket struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeSimpleExtendedAttributes + + // ep is the bound endpoint. + ep transport.BoundEndpoint +} + +var _ fs.InodeOperations = (*Socket)(nil) + +// NewSocket returns a new Socket. +func NewSocket(ctx context.Context, ep transport.BoundEndpoint, owner fs.FileOwner, perms fs.FilePermissions) *Socket { + return &Socket{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, perms, linux.SOCKFS_MAGIC), + ep: ep, + } +} + +// BoundEndpoint returns the socket data. +func (s *Socket) BoundEndpoint(*fs.Inode, string) transport.BoundEndpoint { + // ramfs only supports stored sentry internal sockets. Only gofer sockets + // care about the path argument. + return s.ep +} + +// GetFile implements fs.FileOperations.GetFile. +func (s *Socket) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &socketFileOperations{}), nil +} + +// +stateify savable +type socketFileOperations struct { + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoRead `state:"nosave"` + fsutil.FileNoSeek `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +var _ fs.FileOperations = (*socketFileOperations)(nil) diff --git a/pkg/sentry/fs/ramfs/symlink.go b/pkg/sentry/fs/ramfs/symlink.go new file mode 100644 index 000000000..d988349aa --- /dev/null +++ b/pkg/sentry/fs/ramfs/symlink.go @@ -0,0 +1,106 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/waiter" +) + +// Symlink represents a symlink. +// +// +stateify savable +type Symlink struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeSimpleExtendedAttributes + + // Target is the symlink target. + Target string +} + +var _ fs.InodeOperations = (*Symlink)(nil) + +// NewSymlink returns a new Symlink. +func NewSymlink(ctx context.Context, owner fs.FileOwner, target string) *Symlink { + // A symlink is assumed to always have permissions 0777. + return &Symlink{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, owner, fs.FilePermsFromMode(0777), linux.RAMFS_MAGIC), + Target: target, + } +} + +// UnstableAttr returns all attributes of this ramfs symlink. +func (s *Symlink) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := s.InodeSimpleAttributes.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + uattr.Size = int64(len(s.Target)) + uattr.Usage = uattr.Size + return uattr, nil +} + +// SetPermissions on a symlink is always rejected. +func (s *Symlink) SetPermissions(context.Context, *fs.Inode, fs.FilePermissions) bool { + return false +} + +// Readlink reads the symlink value. +func (s *Symlink) Readlink(ctx context.Context, _ *fs.Inode) (string, error) { + s.NotifyAccess(ctx) + return s.Target, nil +} + +// Getlink returns ErrResolveViaReadlink, falling back to walking to the result +// of Readlink(). +func (*Symlink) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + return nil, fs.ErrResolveViaReadlink +} + +// GetFile implements fs.FileOperations.GetFile. +func (s *Symlink) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &symlinkFileOperations{}), nil +} + +// +stateify savable +type symlinkFileOperations struct { + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoRead `state:"nosave"` + fsutil.FileNoSeek `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoWrite `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +var _ fs.FileOperations = (*symlinkFileOperations)(nil) diff --git a/pkg/sentry/fs/ramfs/tree.go b/pkg/sentry/fs/ramfs/tree.go new file mode 100644 index 000000000..dfc9d3453 --- /dev/null +++ b/pkg/sentry/fs/ramfs/tree.go @@ -0,0 +1,77 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "fmt" + "path" + "strings" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/anon" + "gvisor.dev/gvisor/pkg/usermem" +) + +// MakeDirectoryTree constructs a ramfs tree of all directories containing +// subdirs. Each element of subdir must be a clean path, and cannot be empty or +// "/". +// +// All directories in the created tree will have full (read-write-execute) +// permissions, but note that file creation inside the directories is not +// actually supported because ramfs.Dir.CreateOpts == nil. However, these +// directory trees are normally "underlayed" under another filesystem (possibly +// the root), and file creation inside these directories in the overlay will be +// possible if the upper is writeable. +func MakeDirectoryTree(ctx context.Context, msrc *fs.MountSource, subdirs []string) (*fs.Inode, error) { + root := emptyDir(ctx, msrc) + for _, subdir := range subdirs { + if path.Clean(subdir) != subdir { + return nil, fmt.Errorf("cannot add subdir at an unclean path: %q", subdir) + } + if subdir == "" || subdir == "/" { + return nil, fmt.Errorf("cannot add subdir at %q", subdir) + } + makeSubdir(ctx, msrc, root.InodeOperations.(*Dir), subdir) + } + return root, nil +} + +// makeSubdir installs into root each component of subdir. The final component is +// a *ramfs.Dir. +func makeSubdir(ctx context.Context, msrc *fs.MountSource, root *Dir, subdir string) { + for _, c := range strings.Split(subdir, "/") { + if len(c) == 0 { + continue + } + child, ok := root.FindChild(c) + if !ok { + child = emptyDir(ctx, msrc) + root.AddChild(ctx, c, child) + } + root = child.InodeOperations.(*Dir) + } +} + +// emptyDir returns an empty *ramfs.Dir with all permissions granted. +func emptyDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + dir := NewDir(ctx, make(map[string]*fs.Inode), fs.RootOwner, fs.FilePermsFromMode(0777)) + return fs.NewInode(ctx, dir, msrc, fs.StableAttr{ + DeviceID: anon.PseudoDevice.DeviceID(), + InodeID: anon.PseudoDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} diff --git a/pkg/sentry/fs/ramfs/tree_test.go b/pkg/sentry/fs/ramfs/tree_test.go new file mode 100644 index 000000000..a6ed8b2c5 --- /dev/null +++ b/pkg/sentry/fs/ramfs/tree_test.go @@ -0,0 +1,80 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "testing" + + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +func TestMakeDirectoryTree(t *testing.T) { + + for _, test := range []struct { + name string + subdirs []string + }{ + { + name: "abs paths", + subdirs: []string{ + "/tmp", + "/tmp/a/b", + "/tmp/a/c/d", + "/tmp/c", + "/proc", + "/dev/a/b", + "/tmp", + }, + }, + { + name: "rel paths", + subdirs: []string{ + "tmp", + "tmp/a/b", + "tmp/a/c/d", + "tmp/c", + "proc", + "dev/a/b", + "tmp", + }, + }, + } { + ctx := contexttest.Context(t) + mount := fs.NewPseudoMountSource(ctx) + tree, err := MakeDirectoryTree(ctx, mount, test.subdirs) + if err != nil { + t.Errorf("%s: failed to make ramfs tree, got error %v, want nil", test.name, err) + continue + } + + // Expect to be able to find each of the paths. + mm, err := fs.NewMountNamespace(ctx, tree) + if err != nil { + t.Errorf("%s: failed to create mount manager: %v", test.name, err) + continue + } + root := mm.Root() + defer mm.DecRef() + + for _, p := range test.subdirs { + maxTraversals := uint(0) + if _, err := mm.FindInode(ctx, root, nil, p, &maxTraversals); err != nil { + t.Errorf("%s: failed to find node %s: %v", test.name, p, err) + break + } + } + } +} diff --git a/pkg/sentry/fs/restore.go b/pkg/sentry/fs/restore.go new file mode 100644 index 000000000..64c6a6ae9 --- /dev/null +++ b/pkg/sentry/fs/restore.go @@ -0,0 +1,78 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.dev/gvisor/pkg/sync" +) + +// RestoreEnvironment is the restore environment for file systems. It consists +// of things that change across save and restore and therefore cannot be saved +// in the object graph. +type RestoreEnvironment struct { + // MountSources maps Filesystem.Name() to mount arguments. + MountSources map[string][]MountArgs + + // ValidateFileSize indicates file size should not change across S/R. + ValidateFileSize bool + + // ValidateFileTimestamp indicates file modification timestamp should + // not change across S/R. + ValidateFileTimestamp bool +} + +// MountArgs holds arguments to Mount. +type MountArgs struct { + // Dev corresponds to the devname argumnent of Mount. + Dev string + + // Flags corresponds to the flags argument of Mount. + Flags MountSourceFlags + + // DataString corresponds to the data argument of Mount. + DataString string + + // DataObj corresponds to the data interface argument of Mount. + DataObj interface{} +} + +// restoreEnv holds the fs package global RestoreEnvironment. +var restoreEnv = struct { + mu sync.Mutex + env RestoreEnvironment + set bool +}{} + +// SetRestoreEnvironment sets the RestoreEnvironment. Must be called before +// state.Load and only once. +func SetRestoreEnvironment(r RestoreEnvironment) { + restoreEnv.mu.Lock() + defer restoreEnv.mu.Unlock() + if restoreEnv.set { + panic("RestoreEnvironment may only be set once") + } + restoreEnv.env = r + restoreEnv.set = true +} + +// CurrentRestoreEnvironment returns the current, read-only RestoreEnvironment. +// If no RestoreEnvironment was ever set, returns (_, false). +func CurrentRestoreEnvironment() (RestoreEnvironment, bool) { + restoreEnv.mu.Lock() + defer restoreEnv.mu.Unlock() + e := restoreEnv.env + set := restoreEnv.set + return e, set +} diff --git a/pkg/sentry/fs/save.go b/pkg/sentry/fs/save.go new file mode 100644 index 000000000..fe5c76b44 --- /dev/null +++ b/pkg/sentry/fs/save.go @@ -0,0 +1,77 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "syscall" + + "gvisor.dev/gvisor/pkg/log" +) + +// SaveInodeMappings saves a mapping of path -> inode ID for every +// user-reachable Dirent. +// +// The entire kernel must be frozen to call this, and filesystem state must not +// change between SaveInodeMappings and state.Save, otherwise the saved state +// of any MountSource may be incoherent. +func SaveInodeMappings() { + mountsSeen := make(map[*MountSource]struct{}) + for dirent := range allDirents.dirents { + if _, ok := mountsSeen[dirent.Inode.MountSource]; !ok { + dirent.Inode.MountSource.ResetInodeMappings() + mountsSeen[dirent.Inode.MountSource] = struct{}{} + } + } + + for dirent := range allDirents.dirents { + if dirent.Inode != nil { + // We cannot trust the root provided in the mount due + // to the overlay. We can trust the overlay to delegate + // SaveInodeMappings to the right underlying + // filesystems, though. + root := dirent + for !root.mounted && root.parent != nil { + root = root.parent + } + + // Add the mapping. + n, reachable := dirent.FullName(root) + if !reachable { + // Something has gone seriously wrong if we can't reach our root. + panic(fmt.Sprintf("Unreachable root on dirent file %s", n)) + } + dirent.Inode.MountSource.SaveInodeMapping(dirent.Inode, n) + } + } +} + +// SaveFileFsyncError converts an fs.File.Fsync error to an error that +// indicates that the fs.File was not synced sufficiently to be saved. +func SaveFileFsyncError(err error) error { + switch err { + case nil: + // We succeeded, everything is great. + return nil + case syscall.EBADF, syscall.EINVAL, syscall.EROFS, syscall.ENOSYS, syscall.EPERM: + // These errors mean that the underlying node might not be syncable, + // which we expect to be reported as such even from the gofer. + log.Infof("failed to sync during save: %v", err) + return nil + default: + // We failed in some way that indicates potential data loss. + return fmt.Errorf("failed to sync: %v, data loss may occur", err) + } +} diff --git a/pkg/sentry/fs/seek.go b/pkg/sentry/fs/seek.go new file mode 100644 index 000000000..0f43918ad --- /dev/null +++ b/pkg/sentry/fs/seek.go @@ -0,0 +1,43 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// SeekWhence determines seek direction. +type SeekWhence int + +const ( + // SeekSet sets the absolute offset. + SeekSet SeekWhence = iota + + // SeekCurrent sets relative to the current position. + SeekCurrent + + // SeekEnd sets relative to the end of the file. + SeekEnd +) + +// String returns a human readable string for whence. +func (s SeekWhence) String() string { + switch s { + case SeekSet: + return "Set" + case SeekCurrent: + return "Current" + case SeekEnd: + return "End" + default: + return "Unknown" + } +} diff --git a/pkg/sentry/fs/splice.go b/pkg/sentry/fs/splice.go new file mode 100644 index 000000000..33da82868 --- /dev/null +++ b/pkg/sentry/fs/splice.go @@ -0,0 +1,181 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "io" + "sync/atomic" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/syserror" +) + +// Splice moves data to this file, directly from another. +// +// Offsets are updated only if DstOffset and SrcOffset are set. +func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64, error) { + // Verify basic file flag permissions. + if !dst.Flags().Write || !src.Flags().Read { + return 0, syserror.EBADF + } + + // Check whether or not the objects being sliced are stream-oriented + // (i.e. pipes or sockets). For all stream-oriented files and files + // where a specific offiset is not request, we acquire the file mutex. + // This has two important side effects. First, it provides the standard + // protection against concurrent writes that would mutate the offset. + // Second, it prevents Splice deadlocks. Only internal anonymous files + // implement the ReadFrom and WriteTo methods directly, and since such + // anonymous files are referred to by a unique fs.File object, we know + // that the file mutex takes strict precedence over internal locks. + // Since we enforce lock ordering here, we can't deadlock by using + // using a file in two different splice operations simultaneously. + srcPipe := !IsRegular(src.Dirent.Inode.StableAttr) + dstPipe := !IsRegular(dst.Dirent.Inode.StableAttr) + dstAppend := !dstPipe && dst.Flags().Append + srcLock := srcPipe || !opts.SrcOffset + dstLock := dstPipe || !opts.DstOffset || dstAppend + + switch { + case srcLock && dstLock: + switch { + case dst.UniqueID < src.UniqueID: + // Acquire dst first. + if !dst.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + if !src.mu.Lock(ctx) { + dst.mu.Unlock() + return 0, syserror.ErrInterrupted + } + case dst.UniqueID > src.UniqueID: + // Acquire src first. + if !src.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + if !dst.mu.Lock(ctx) { + src.mu.Unlock() + return 0, syserror.ErrInterrupted + } + case dst.UniqueID == src.UniqueID: + // Acquire only one lock; it's the same file. This is a + // bit of a edge case, but presumably it's possible. + if !dst.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + srcLock = false // Only need one unlock. + } + // Use both offsets (locked). + opts.DstStart = dst.offset + opts.SrcStart = src.offset + case dstLock: + // Acquire only dst. + if !dst.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + opts.DstStart = dst.offset // Safe: locked. + case srcLock: + // Acquire only src. + if !src.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + opts.SrcStart = src.offset // Safe: locked. + } + + var err error + if dstAppend { + unlock := dst.Dirent.Inode.lockAppendMu(dst.Flags().Append) + defer unlock() + + // Figure out the appropriate offset to use. + err = dst.offsetForAppend(ctx, &opts.DstStart) + } + if err == nil && !dstPipe { + // Enforce file limits. + limit, ok := dst.checkLimit(ctx, opts.DstStart) + switch { + case ok && limit == 0: + err = syserror.ErrExceedsFileSizeLimit + case ok && limit < opts.Length: + opts.Length = limit // Cap the write. + } + } + if err != nil { + if dstLock { + dst.mu.Unlock() + } + if srcLock { + src.mu.Unlock() + } + return 0, err + } + + // Construct readers and writers for the splice. This is used to + // provide a safer locking path for the WriteTo/ReadFrom operations + // (since they will otherwise go through public interface methods which + // conflict with locking done above), and simplifies the fallback path. + w := &lockedWriter{ + Ctx: ctx, + File: dst, + Offset: opts.DstStart, + } + r := &lockedReader{ + Ctx: ctx, + File: src, + Offset: opts.SrcStart, + } + + // Attempt to do a WriteTo; this is likely the most efficient. + n, err := src.FileOperations.WriteTo(ctx, src, w, opts.Length, opts.Dup) + if n == 0 && err == syserror.ENOSYS && !opts.Dup { + // Attempt as a ReadFrom. If a WriteTo, a ReadFrom may also be + // more efficient than a copy if buffers are cached or readily + // available. (It's unlikely that they can actually be donated). + n, err = dst.FileOperations.ReadFrom(ctx, dst, r, opts.Length) + } + + // Support one last fallback option, but only if at least one of + // the source and destination are regular files. This is because + // if we block at some point, we could lose data. If the source is + // not a pipe then reading is not destructive; if the destination + // is a regular file, then it is guaranteed not to block writing. + if n == 0 && err == syserror.ENOSYS && !opts.Dup && (!dstPipe || !srcPipe) { + // Fallback to an in-kernel copy. + n, err = io.Copy(w, &io.LimitedReader{ + R: r, + N: opts.Length, + }) + } + + // Update offsets, if required. + if n > 0 { + if !dstPipe && !opts.DstOffset { + atomic.StoreInt64(&dst.offset, dst.offset+n) + } + if !srcPipe && !opts.SrcOffset { + atomic.StoreInt64(&src.offset, src.offset+n) + } + } + + // Drop locks. + if dstLock { + dst.mu.Unlock() + } + if srcLock { + src.mu.Unlock() + } + + return n, err +} diff --git a/pkg/sentry/fs/sync.go b/pkg/sentry/fs/sync.go new file mode 100644 index 000000000..1fff8059c --- /dev/null +++ b/pkg/sentry/fs/sync.go @@ -0,0 +1,43 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// SyncType enumerates ways in which a File can be synced. +type SyncType int + +const ( + // SyncAll indicates that modified in-memory metadata and data should + // be written to backing storage. SyncAll implies SyncBackingStorage. + SyncAll SyncType = iota + + // SyncData indicates that along with modified in-memory data, only + // metadata needed to access that data needs to be written. + // + // For example, changes to access time or modification time do not + // need to be written because they are not necessary for a data read + // to be handled correctly, unlike the file size. + // + // The aim of SyncData is to reduce disk activity for applications + // that do not require all metadata to be synchronized with the disk, + // see fdatasync(2). File systems that implement SyncData as SyncAll + // do not support this optimization. + // + // SyncData implies SyncBackingStorage. + SyncData + + // SyncBackingStorage indicates that in-flight write operations to + // backing storage should be flushed. + SyncBackingStorage +) diff --git a/pkg/sentry/fs/sys/BUILD b/pkg/sentry/fs/sys/BUILD new file mode 100644 index 000000000..f2e8b9932 --- /dev/null +++ b/pkg/sentry/fs/sys/BUILD @@ -0,0 +1,24 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "sys", + srcs = [ + "device.go", + "devices.go", + "fs.go", + "sys.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/kernel", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/sys/device.go b/pkg/sentry/fs/sys/device.go new file mode 100644 index 000000000..4e79dbb71 --- /dev/null +++ b/pkg/sentry/fs/sys/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sys + +import "gvisor.dev/gvisor/pkg/sentry/device" + +// sysfsDevice is the sysfs virtual device. +var sysfsDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/sys/devices.go b/pkg/sentry/fs/sys/devices.go new file mode 100644 index 000000000..b67065956 --- /dev/null +++ b/pkg/sentry/fs/sys/devices.go @@ -0,0 +1,91 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sys + +import ( + "fmt" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/kernel" +) + +// +stateify savable +type cpunum struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopRelease `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeNotVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + fsutil.InodeStaticFileGetter +} + +var _ fs.InodeOperations = (*cpunum)(nil) + +func newPossible(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + var maxCore uint + k := kernel.KernelFromContext(ctx) + if k != nil { + maxCore = k.ApplicationCores() - 1 + } + contents := []byte(fmt.Sprintf("0-%d\n", maxCore)) + + c := &cpunum{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.SYSFS_MAGIC), + InodeStaticFileGetter: fsutil.InodeStaticFileGetter{ + Contents: contents, + }, + } + return newFile(ctx, c, msrc) +} + +func newCPU(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + m := map[string]*fs.Inode{ + "online": newPossible(ctx, msrc), + "possible": newPossible(ctx, msrc), + "present": newPossible(ctx, msrc), + } + + // Add directories for each of the cpus. + if k := kernel.KernelFromContext(ctx); k != nil { + for i := 0; uint(i) < k.ApplicationCores(); i++ { + m[fmt.Sprintf("cpu%d", i)] = newDir(ctx, msrc, nil) + } + } + + return newDir(ctx, msrc, m) +} + +func newSystemDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + return newDir(ctx, msrc, map[string]*fs.Inode{ + "cpu": newCPU(ctx, msrc), + }) +} + +func newDevicesDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + return newDir(ctx, msrc, map[string]*fs.Inode{ + "system": newSystemDir(ctx, msrc), + }) +} diff --git a/pkg/sentry/fs/sys/fs.go b/pkg/sentry/fs/sys/fs.go new file mode 100644 index 000000000..fd03a4e38 --- /dev/null +++ b/pkg/sentry/fs/sys/fs.go @@ -0,0 +1,65 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sys + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" +) + +// filesystem is a sysfs. +// +// +stateify savable +type filesystem struct{} + +var _ fs.Filesystem = (*filesystem)(nil) + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// Name matches fs/sysfs/mount.c:sysfs_fs_type.name. +const FilesystemName = "sysfs" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, sysfs returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/sysfs/mount.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a sysfs root which can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, _ interface{}) (*fs.Inode, error) { + // device is always ignored. + // sysfs ignores data, see fs/sysfs/mount.c:sysfs_mount. + + return New(ctx, fs.NewNonCachingMountSource(ctx, f, flags)), nil +} diff --git a/pkg/sentry/fs/sys/sys.go b/pkg/sentry/fs/sys/sys.go new file mode 100644 index 000000000..0891645e4 --- /dev/null +++ b/pkg/sentry/fs/sys/sys.go @@ -0,0 +1,64 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sys implements a sysfs filesystem. +package sys + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/usermem" +) + +func newFile(ctx context.Context, node fs.InodeOperations, msrc *fs.MountSource) *fs.Inode { + sattr := fs.StableAttr{ + DeviceID: sysfsDevice.DeviceID(), + InodeID: sysfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(ctx, node, msrc, sattr) +} + +func newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.Inode) *fs.Inode { + d := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(ctx, d, msrc, fs.StableAttr{ + DeviceID: sysfsDevice.DeviceID(), + InodeID: sysfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialDirectory, + }) +} + +// New returns the root node of a partial simple sysfs. +func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + return newDir(ctx, msrc, map[string]*fs.Inode{ + // Add a basic set of top-level directories. In Linux, these + // are dynamically added depending on the KConfig. Here we just + // add the most common ones. + "block": newDir(ctx, msrc, nil), + "bus": newDir(ctx, msrc, nil), + "class": newDir(ctx, msrc, map[string]*fs.Inode{ + "power_supply": newDir(ctx, msrc, nil), + }), + "dev": newDir(ctx, msrc, nil), + "devices": newDevicesDir(ctx, msrc), + "firmware": newDir(ctx, msrc, nil), + "fs": newDir(ctx, msrc, nil), + "kernel": newDir(ctx, msrc, nil), + "module": newDir(ctx, msrc, nil), + "power": newDir(ctx, msrc, nil), + }) +} diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD new file mode 100644 index 000000000..d16cdb4df --- /dev/null +++ b/pkg/sentry/fs/timerfd/BUILD @@ -0,0 +1,19 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "timerfd", + srcs = ["timerfd.go"], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel/time", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go new file mode 100644 index 000000000..88c344089 --- /dev/null +++ b/pkg/sentry/fs/timerfd/timerfd.go @@ -0,0 +1,151 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package timerfd implements the semantics of Linux timerfd objects as +// described by timerfd_create(2). +package timerfd + +import ( + "sync/atomic" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/anon" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// TimerOperations implements fs.FileOperations for timerfds. +// +// +stateify savable +type TimerOperations struct { + fsutil.FileZeroSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + events waiter.Queue `state:"zerovalue"` + timer *ktime.Timer + + // val is the number of timer expirations since the last successful call to + // Readv, Preadv, or SetTime. val is accessed using atomic memory + // operations. + val uint64 +} + +// NewFile returns a timerfd File that receives time from c. +func NewFile(ctx context.Context, c ktime.Clock) *fs.File { + dirent := fs.NewDirent(ctx, anon.NewInode(ctx), "anon_inode:[timerfd]") + // Release the initial dirent reference after NewFile takes a reference. + defer dirent.DecRef() + tops := &TimerOperations{} + tops.timer = ktime.NewTimer(c, tops) + // Timerfds reject writes, but the Write flag must be set in order to + // ensure that our Writev/Pwritev methods actually get called to return + // the correct errors. + return fs.NewFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}, tops) +} + +// Release implements fs.FileOperations.Release. +func (t *TimerOperations) Release() { + t.timer.Destroy() +} + +// PauseTimer pauses the associated Timer. +func (t *TimerOperations) PauseTimer() { + t.timer.Pause() +} + +// ResumeTimer resumes the associated Timer. +func (t *TimerOperations) ResumeTimer() { + t.timer.Resume() +} + +// Clock returns the associated Timer's Clock. +func (t *TimerOperations) Clock() ktime.Clock { + return t.timer.Clock() +} + +// GetTime returns the associated Timer's setting and the time at which it was +// observed. +func (t *TimerOperations) GetTime() (ktime.Time, ktime.Setting) { + return t.timer.Get() +} + +// SetTime atomically changes the associated Timer's setting, resets the number +// of expirations to 0, and returns the previous setting and the time at which +// it was observed. +func (t *TimerOperations) SetTime(s ktime.Setting) (ktime.Time, ktime.Setting) { + return t.timer.SwapAnd(s, func() { atomic.StoreUint64(&t.val, 0) }) +} + +// Readiness implements waiter.Waitable.Readiness. +func (t *TimerOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + var ready waiter.EventMask + if atomic.LoadUint64(&t.val) != 0 { + ready |= waiter.EventIn + } + return ready +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (t *TimerOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + t.events.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (t *TimerOperations) EventUnregister(e *waiter.Entry) { + t.events.EventUnregister(e) +} + +// Read implements fs.FileOperations.Read. +func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + const sizeofUint64 = 8 + if dst.NumBytes() < sizeofUint64 { + return 0, syserror.EINVAL + } + if val := atomic.SwapUint64(&t.val, 0); val != 0 { + var buf [sizeofUint64]byte + usermem.ByteOrder.PutUint64(buf[:], val) + if _, err := dst.CopyOut(ctx, buf[:]); err != nil { + // Linux does not undo consuming the number of expirations even if + // writing to userspace fails. + return 0, err + } + return sizeofUint64, nil + } + return 0, syserror.ErrWouldBlock +} + +// Write implements fs.FileOperations.Write. +func (t *TimerOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EINVAL +} + +// Notify implements ktime.TimerListener.Notify. +func (t *TimerOperations) Notify(exp uint64, setting ktime.Setting) (ktime.Setting, bool) { + atomic.AddUint64(&t.val, exp) + t.events.Notify(waiter.EventIn) + return ktime.Setting{}, false +} + +// Destroy implements ktime.TimerListener.Destroy. +func (t *TimerOperations) Destroy() {} diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD new file mode 100644 index 000000000..aa7199014 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/BUILD @@ -0,0 +1,50 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "tmpfs", + srcs = [ + "device.go", + "file_regular.go", + "fs.go", + "inode_file.go", + "tmpfs.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/metric", + "//pkg/safemem", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/pipe", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/usage", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "tmpfs_test", + size = "small", + srcs = ["file_test.go"], + library = ":tmpfs", + deps = [ + "//pkg/context", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/contexttest", + "//pkg/sentry/usage", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/tmpfs/device.go b/pkg/sentry/fs/tmpfs/device.go new file mode 100644 index 000000000..ae7c55ee1 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import "gvisor.dev/gvisor/pkg/sentry/device" + +// tmpfsDevice is the kernel tmpfs device. +var tmpfsDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/tmpfs/file_regular.go b/pkg/sentry/fs/tmpfs/file_regular.go new file mode 100644 index 000000000..614f8f8a1 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/file_regular.go @@ -0,0 +1,60 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// regularFileOperations implements fs.FileOperations for a regular +// tmpfs file. +// +// +stateify savable +type regularFileOperations struct { + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoopFsync `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + // iops is the InodeOperations of a regular tmpfs file. It is + // guaranteed to be the same as file.Dirent.Inode.InodeOperations, + // see operations that take fs.File below. + iops *fileInodeOperations +} + +// Read implements fs.FileOperations.Read. +func (r *regularFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + return r.iops.read(ctx, file, dst, offset) +} + +// Write implements fs.FileOperations.Write. +func (r *regularFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + return r.iops.write(ctx, src, offset) +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (r *regularFileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + return fsutil.GenericConfigureMMap(file, r.iops, opts) +} diff --git a/pkg/sentry/fs/tmpfs/file_test.go b/pkg/sentry/fs/tmpfs/file_test.go new file mode 100644 index 000000000..aaba35502 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/file_test.go @@ -0,0 +1,72 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "bytes" + "testing" + + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/usermem" +) + +func newFileInode(ctx context.Context) *fs.Inode { + m := fs.NewCachingMountSource(ctx, &Filesystem{}, fs.MountSourceFlags{}) + iops := NewInMemoryFile(ctx, usage.Tmpfs, fs.WithCurrentTime(ctx, fs.UnstableAttr{})) + return fs.NewInode(ctx, iops, m, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.RegularFile, + }) +} + +func newFile(ctx context.Context) *fs.File { + inode := newFileInode(ctx) + f, _ := inode.GetFile(ctx, fs.NewDirent(ctx, inode, "stub"), fs.FileFlags{Read: true, Write: true}) + return f +} + +// Allocate once, write twice. +func TestGrow(t *testing.T) { + ctx := contexttest.Context(t) + f := newFile(ctx) + defer f.DecRef() + + abuf := bytes.Repeat([]byte{'a'}, 68) + n, err := f.Pwritev(ctx, usermem.BytesIOSequence(abuf), 0) + if n != int64(len(abuf)) || err != nil { + t.Fatalf("Pwritev got (%d, %v) want (%d, nil)", n, err, len(abuf)) + } + + bbuf := bytes.Repeat([]byte{'b'}, 856) + n, err = f.Pwritev(ctx, usermem.BytesIOSequence(bbuf), 68) + if n != int64(len(bbuf)) || err != nil { + t.Fatalf("Pwritev got (%d, %v) want (%d, nil)", n, err, len(bbuf)) + } + + rbuf := make([]byte, len(abuf)+len(bbuf)) + n, err = f.Preadv(ctx, usermem.BytesIOSequence(rbuf), 0) + if n != int64(len(rbuf)) || err != nil { + t.Fatalf("Preadv got (%d, %v) want (%d, nil)", n, err, len(rbuf)) + } + + if want := append(abuf, bbuf...); !bytes.Equal(rbuf, want) { + t.Fatalf("Read %v, want %v", rbuf, want) + } +} diff --git a/pkg/sentry/fs/tmpfs/fs.go b/pkg/sentry/fs/tmpfs/fs.go new file mode 100644 index 000000000..bc117ca6a --- /dev/null +++ b/pkg/sentry/fs/tmpfs/fs.go @@ -0,0 +1,155 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "fmt" + "strconv" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" +) + +const ( + // Set initial permissions for the root directory. + modeKey = "mode" + + // UID for the root directory. + rootUIDKey = "uid" + + // GID for the root directory. + rootGIDKey = "gid" + + // cacheKey sets the caching policy for the mount. + cacheKey = "cache" + + // cacheAll uses the virtual file system cache for everything (default). + cacheAll = "cache" + + // cacheRevalidate allows dirents to be cached, but revalidates them on each + // lookup. + cacheRevalidate = "revalidate" + + // Permissions that exceed modeMask will be rejected. + modeMask = 01777 + + // Default permissions are read/write/execute. + defaultMode = 0777 +) + +// Filesystem is a tmpfs. +// +// +stateify savable +type Filesystem struct{} + +var _ fs.Filesystem = (*Filesystem)(nil) + +func init() { + fs.RegisterFilesystem(&Filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// Name matches mm/shmem.c:shmem_fs_type.name. +const FilesystemName = "tmpfs" + +// Name is the name of the file system. +func (*Filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*Filesystem) AllowUserMount() bool { + return true +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*Filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, tmpfs returns FS_USERNS_MOUNT, see mm/shmem.c. +func (*Filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a tmpfs root that can be positioned in the vfs. +func (f *Filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, _ interface{}) (*fs.Inode, error) { + // device is always ignored. + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Parse the root directory permissions. + perms := fs.FilePermsFromMode(defaultMode) + if m, ok := options[modeKey]; ok { + i, err := strconv.ParseUint(m, 8, 32) + if err != nil { + return nil, fmt.Errorf("mode value not parsable 'mode=%s': %v", m, err) + } + if i&^modeMask != 0 { + return nil, fmt.Errorf("invalid mode %q: must be less than %o", m, modeMask) + } + perms = fs.FilePermsFromMode(linux.FileMode(i)) + delete(options, modeKey) + } + + creds := auth.CredentialsFromContext(ctx) + owner := fs.FileOwnerFromContext(ctx) + if uidstr, ok := options[rootUIDKey]; ok { + uid, err := strconv.ParseInt(uidstr, 10, 32) + if err != nil { + return nil, fmt.Errorf("uid value not parsable 'uid=%d': %v", uid, err) + } + owner.UID = creds.UserNamespace.MapToKUID(auth.UID(uid)) + delete(options, rootUIDKey) + } + + if gidstr, ok := options[rootGIDKey]; ok { + gid, err := strconv.ParseInt(gidstr, 10, 32) + if err != nil { + return nil, fmt.Errorf("gid value not parsable 'gid=%d': %v", gid, err) + } + owner.GID = creds.UserNamespace.MapToKGID(auth.GID(gid)) + delete(options, rootGIDKey) + } + + // Construct a mount which will follow the cache options provided. + // + // TODO(gvisor.dev/issue/179): There should be no reason to disable + // caching once bind mounts are properly supported. + var msrc *fs.MountSource + switch options[cacheKey] { + case "", cacheAll: + msrc = fs.NewCachingMountSource(ctx, f, flags) + case cacheRevalidate: + msrc = fs.NewRevalidatingMountSource(ctx, f, flags) + default: + return nil, fmt.Errorf("invalid cache policy option %q", options[cacheKey]) + } + delete(options, cacheKey) + + // Fail if the caller passed us more options than we can parse. They may be + // expecting us to set something we can't set. + if len(options) > 0 { + return nil, fmt.Errorf("unsupported mount options: %v", options) + } + + // Construct the tmpfs root. + return NewDir(ctx, nil, owner, perms, msrc), nil +} diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go new file mode 100644 index 000000000..1dc75291d --- /dev/null +++ b/pkg/sentry/fs/tmpfs/inode_file.go @@ -0,0 +1,687 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "fmt" + "io" + "math" + "time" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/metric" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/kernel" + ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time" + "gvisor.dev/gvisor/pkg/sentry/memmap" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +var ( + opensRO = metric.MustCreateNewUint64Metric("/in_memory_file/opens_ro", false /* sync */, "Number of times an in-memory file was opened in read-only mode.") + opensW = metric.MustCreateNewUint64Metric("/in_memory_file/opens_w", false /* sync */, "Number of times an in-memory file was opened in write mode.") + reads = metric.MustCreateNewUint64Metric("/in_memory_file/reads", false /* sync */, "Number of in-memory file reads.") + readWait = metric.MustCreateNewUint64NanosecondsMetric("/in_memory_file/read_wait", false /* sync */, "Time waiting on in-memory file reads, in nanoseconds.") +) + +// fileInodeOperations implements fs.InodeOperations for a regular tmpfs file. +// These files are backed by pages allocated from a platform.Memory, and may be +// directly mapped. +// +// Lock order: attrMu -> mapsMu -> dataMu. +// +// +stateify savable +type fileInodeOperations struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + + fsutil.InodeSimpleExtendedAttributes + + // kernel is used to allocate memory that stores the file's contents. + kernel *kernel.Kernel + + // memUsage is the default memory usage that will be reported by this file. + memUsage usage.MemoryKind + + attrMu sync.Mutex `state:"nosave"` + + // attr contains the unstable metadata for the file. + // + // attr is protected by attrMu. attr.Size is protected by both attrMu + // and dataMu; reading it requires locking either mutex, while mutating + // it requires locking both. + attr fs.UnstableAttr + + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks mappings of the file into memmap.MappingSpaces. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + // writableMappingPages tracks how many pages of virtual memory are mapped + // as potentially writable from this file. If a page has multiple mappings, + // each mapping is counted separately. + // + // This counter is susceptible to overflow as we can potentially count + // mappings from many VMAs. We count pages rather than bytes to slightly + // mitigate this. + // + // Protected by mapsMu. + writableMappingPages uint64 + + dataMu sync.RWMutex `state:"nosave"` + + // data maps offsets into the file to offsets into platform.Memory() that + // store the file's data. + // + // data is protected by dataMu. + data fsutil.FileRangeSet + + // seals represents file seals on this inode. + // + // Protected by dataMu. + seals uint32 +} + +var _ fs.InodeOperations = (*fileInodeOperations)(nil) + +// NewInMemoryFile returns a new file backed by Kernel.MemoryFile(). +func NewInMemoryFile(ctx context.Context, usage usage.MemoryKind, uattr fs.UnstableAttr) fs.InodeOperations { + return &fileInodeOperations{ + attr: uattr, + kernel: kernel.KernelFromContext(ctx), + memUsage: usage, + seals: linux.F_SEAL_SEAL, + } +} + +// NewMemfdInode creates a new inode backing a memfd. Memory used by the memfd +// is backed by platform memory. +func NewMemfdInode(ctx context.Context, allowSeals bool) *fs.Inode { + // Per Linux, mm/shmem.c:__shmem_file_setup(), memfd inodes are set up with + // S_IRWXUGO. + perms := fs.PermMask{Read: true, Write: true, Execute: true} + iops := NewInMemoryFile(ctx, usage.Tmpfs, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: fs.FilePermissions{User: perms, Group: perms, Other: perms}}).(*fileInodeOperations) + if allowSeals { + iops.seals = 0 + } + return fs.NewInode(ctx, iops, fs.NewNonCachingMountSource(ctx, nil, fs.MountSourceFlags{}), fs.StableAttr{ + Type: fs.RegularFile, + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + }) +} + +// Release implements fs.InodeOperations.Release. +func (f *fileInodeOperations) Release(context.Context) { + f.dataMu.Lock() + defer f.dataMu.Unlock() + f.data.DropAll(f.kernel.MemoryFile()) +} + +// Mappable implements fs.InodeOperations.Mappable. +func (f *fileInodeOperations) Mappable(*fs.Inode) memmap.Mappable { + return f +} + +// Rename implements fs.InodeOperations.Rename. +func (*fileInodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return rename(ctx, oldParent, oldName, newParent, newName, replacement) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (f *fileInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + if flags.Write { + opensW.Increment() + } else if flags.Read { + opensRO.Increment() + } + flags.Pread = true + flags.Pwrite = true + return fs.NewFile(ctx, d, flags, ®ularFileOperations{iops: f}), nil +} + +// UnstableAttr returns unstable attributes of this tmpfs file. +func (f *fileInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + f.attrMu.Lock() + f.dataMu.RLock() + attr := f.attr + attr.Usage = int64(f.data.Span()) + f.dataMu.RUnlock() + f.attrMu.Unlock() + return attr, nil +} + +// Check implements fs.InodeOperations.Check. +func (f *fileInodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (f *fileInodeOperations) SetPermissions(ctx context.Context, _ *fs.Inode, p fs.FilePermissions) bool { + f.attrMu.Lock() + f.attr.SetPermissions(ctx, p) + f.attrMu.Unlock() + return true +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (f *fileInodeOperations) SetTimestamps(ctx context.Context, _ *fs.Inode, ts fs.TimeSpec) error { + f.attrMu.Lock() + f.attr.SetTimestamps(ctx, ts) + f.attrMu.Unlock() + return nil +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (f *fileInodeOperations) SetOwner(ctx context.Context, _ *fs.Inode, owner fs.FileOwner) error { + f.attrMu.Lock() + f.attr.SetOwner(ctx, owner) + f.attrMu.Unlock() + return nil +} + +// Truncate implements fs.InodeOperations.Truncate. +func (f *fileInodeOperations) Truncate(ctx context.Context, _ *fs.Inode, size int64) error { + f.attrMu.Lock() + defer f.attrMu.Unlock() + + f.dataMu.Lock() + oldSize := f.attr.Size + + // Check if current seals allow truncation. + switch { + case size > oldSize && f.seals&linux.F_SEAL_GROW != 0: // Grow sealed + fallthrough + case oldSize > size && f.seals&linux.F_SEAL_SHRINK != 0: // Shrink sealed + f.dataMu.Unlock() + return syserror.EPERM + } + + if oldSize != size { + f.attr.Size = size + // Update mtime and ctime. + now := ktime.NowFromContext(ctx) + f.attr.ModificationTime = now + f.attr.StatusChangeTime = now + } + f.dataMu.Unlock() + + // Nothing left to do unless shrinking the file. + if oldSize <= size { + return nil + } + + oldpgend := fs.OffsetPageEnd(oldSize) + newpgend := fs.OffsetPageEnd(size) + + // Invalidate past translations of truncated pages. + if newpgend != oldpgend { + f.mapsMu.Lock() + f.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{ + // Compare Linux's mm/shmem.c:shmem_setattr() => + // mm/memory.c:unmap_mapping_range(evencows=1). + InvalidatePrivate: true, + }) + f.mapsMu.Unlock() + } + + // We are now guaranteed that there are no translations of truncated pages, + // and can remove them. + f.dataMu.Lock() + defer f.dataMu.Unlock() + f.data.Truncate(uint64(size), f.kernel.MemoryFile()) + + return nil +} + +// Allocate implements fs.InodeOperations.Allocate. +func (f *fileInodeOperations) Allocate(ctx context.Context, _ *fs.Inode, offset, length int64) error { + newSize := offset + length + + f.attrMu.Lock() + defer f.attrMu.Unlock() + f.dataMu.Lock() + defer f.dataMu.Unlock() + + if newSize <= f.attr.Size { + return nil + } + + // Check if current seals allow growth. + if f.seals&linux.F_SEAL_GROW != 0 { + return syserror.EPERM + } + + f.attr.Size = newSize + + now := ktime.NowFromContext(ctx) + f.attr.ModificationTime = now + f.attr.StatusChangeTime = now + + return nil +} + +// AddLink implements fs.InodeOperations.AddLink. +func (f *fileInodeOperations) AddLink() { + f.attrMu.Lock() + f.attr.Links++ + f.attrMu.Unlock() +} + +// DropLink implements fs.InodeOperations.DropLink. +func (f *fileInodeOperations) DropLink() { + f.attrMu.Lock() + f.attr.Links-- + f.attrMu.Unlock() +} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (f *fileInodeOperations) NotifyStatusChange(ctx context.Context) { + f.attrMu.Lock() + f.attr.StatusChangeTime = ktime.NowFromContext(ctx) + f.attrMu.Unlock() +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (*fileInodeOperations) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +func (*fileInodeOperations) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +func (f *fileInodeOperations) read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + var start time.Time + if fs.RecordWaitTime { + start = time.Now() + } + reads.Increment() + // Zero length reads for tmpfs are no-ops. + if dst.NumBytes() == 0 { + fs.IncrementWait(readWait, start) + return 0, nil + } + + // Have we reached EOF? We check for this again in + // fileReadWriter.ReadToBlocks to avoid holding f.attrMu (which would + // serialize reads) or f.dataMu (which would violate lock ordering), but + // check here first (before calling into MM) since reading at EOF is + // common: getting a return value of 0 from a read syscall is the only way + // to detect EOF. + // + // TODO(jamieliu): Separate out f.attr.Size and use atomics instead of + // f.dataMu. + f.dataMu.RLock() + size := f.attr.Size + f.dataMu.RUnlock() + if offset >= size { + fs.IncrementWait(readWait, start) + return 0, io.EOF + } + + n, err := dst.CopyOutFrom(ctx, &fileReadWriter{f, offset}) + if !file.Dirent.Inode.MountSource.Flags.NoAtime { + // Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed(). + f.attrMu.Lock() + f.attr.AccessTime = ktime.NowFromContext(ctx) + f.attrMu.Unlock() + } + fs.IncrementWait(readWait, start) + return n, err +} + +func (f *fileInodeOperations) write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // Zero length writes for tmpfs are no-ops. + if src.NumBytes() == 0 { + return 0, nil + } + + f.attrMu.Lock() + defer f.attrMu.Unlock() + // Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time(). + now := ktime.NowFromContext(ctx) + f.attr.ModificationTime = now + f.attr.StatusChangeTime = now + return src.CopyInTo(ctx, &fileReadWriter{f, offset}) +} + +type fileReadWriter struct { + f *fileInodeOperations + offset int64 +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + rw.f.dataMu.RLock() + defer rw.f.dataMu.RUnlock() + + // Compute the range to read. + if rw.offset >= rw.f.attr.Size { + return 0, io.EOF + } + end := fs.ReadEndOffset(rw.offset, int64(dsts.NumBytes()), rw.f.attr.Size) + if end == rw.offset { // dsts.NumBytes() == 0? + return 0, nil + } + + mf := rw.f.kernel.MemoryFile() + var done uint64 + seg, gap := rw.f.data.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings. + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + if err != nil { + return done, err + } + + // Copy from internal mappings. + n, err := safemem.CopySeq(dsts, ims) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + // Tmpfs holes are zero-filled. + gapmr := gap.Range().Intersect(mr) + dst := dsts.TakeFirst64(gapmr.Length()) + n, err := safemem.ZeroSeq(dst) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), fsutil.FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + rw.f.dataMu.Lock() + defer rw.f.dataMu.Unlock() + + // Compute the range to write. + if srcs.NumBytes() == 0 { + // Nothing to do. + return 0, nil + } + end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes())) + if end == math.MaxInt64 { + // Overflow. + return 0, syserror.EINVAL + } + + // Check if seals prevent either file growth or all writes. + switch { + case rw.f.seals&linux.F_SEAL_WRITE != 0: // Write sealed + return 0, syserror.EPERM + case end > rw.f.attr.Size && rw.f.seals&linux.F_SEAL_GROW != 0: // Grow sealed + // When growth is sealed, Linux effectively allows writes which would + // normally grow the file to partially succeed up to the current EOF, + // rounded down to the page boundary before the EOF. + // + // This happens because writes (and thus the growth check) for tmpfs + // files proceed page-by-page on Linux, and the final write to the page + // containing EOF fails, resulting in a partial write up to the start of + // that page. + // + // To emulate this behaviour, artifically truncate the write to the + // start of the page containing the current EOF. + // + // See Linux, mm/filemap.c:generic_perform_write() and + // mm/shmem.c:shmem_write_begin(). + if pgstart := int64(usermem.Addr(rw.f.attr.Size).RoundDown()); end > pgstart { + end = pgstart + } + if end <= rw.offset { + // Truncation would result in no data being written. + return 0, syserror.EPERM + } + } + + defer func() { + // If the write ends beyond the file's previous size, it causes the + // file to grow. + if rw.offset > rw.f.attr.Size { + rw.f.attr.Size = rw.offset + } + }() + + mf := rw.f.kernel.MemoryFile() + // Page-aligned mr for when we need to allocate memory. RoundUp can't + // overflow since end is an int64. + pgstartaddr := usermem.Addr(rw.offset).RoundDown() + pgendaddr, _ := usermem.Addr(end).RoundUp() + pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)} + + var done uint64 + seg, gap := rw.f.data.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings. + ims, err := mf.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) + if err != nil { + return done, err + } + + // Copy to internal mappings. + n, err := safemem.CopySeq(ims, srcs) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + // Allocate memory for the write. + gapMR := gap.Range().Intersect(pgMR) + fr, err := mf.Allocate(gapMR.Length(), rw.f.memUsage) + if err != nil { + return done, err + } + + // Write to that memory as usual. + seg, gap = rw.f.data.Insert(gap, gapMR, fr.Start), fsutil.FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) error { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + + f.dataMu.RLock() + defer f.dataMu.RUnlock() + + // Reject writable mapping if F_SEAL_WRITE is set. + if f.seals&linux.F_SEAL_WRITE != 0 && writable { + return syserror.EPERM + } + + f.mappings.AddMapping(ms, ar, offset, writable) + if writable { + pagesBefore := f.writableMappingPages + + // ar is guaranteed to be page aligned per memmap.Mappable. + f.writableMappingPages += uint64(ar.Length() / usermem.PageSize) + + if f.writableMappingPages < pagesBefore { + panic(fmt.Sprintf("Overflow while mapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages)) + } + } + + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64, writable bool) { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + + f.mappings.RemoveMapping(ms, ar, offset, writable) + + if writable { + pagesBefore := f.writableMappingPages + + // ar is guaranteed to be page aligned per memmap.Mappable. + f.writableMappingPages -= uint64(ar.Length() / usermem.PageSize) + + if f.writableMappingPages > pagesBefore { + panic(fmt.Sprintf("Underflow while unmapping potentially writable pages pointing to a tmpfs file. Before %v, after %v", pagesBefore, f.writableMappingPages)) + } + } +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64, writable bool) error { + return f.AddMapping(ctx, ms, dstAR, offset, writable) +} + +// Translate implements memmap.Mappable.Translate. +func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + f.dataMu.Lock() + defer f.dataMu.Unlock() + + // Constrain translations to f.attr.Size (rounded up) to prevent + // translation to pages that may be concurrently truncated. + pgend := fs.OffsetPageEnd(f.attr.Size) + var beyondEOF bool + if required.End > pgend { + if required.Start >= pgend { + return nil, &memmap.BusError{io.EOF} + } + beyondEOF = true + required.End = pgend + } + if optional.End > pgend { + optional.End = pgend + } + + mf := f.kernel.MemoryFile() + cerr := f.data.Fill(ctx, required, optional, mf, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { + // Newly-allocated pages are zeroed, so we don't need to do anything. + return dsts.NumBytes(), nil + }) + + var ts []memmap.Translation + var translatedEnd uint64 + for seg := f.data.FindSegment(required.Start); seg.Ok() && seg.Start() < required.End; seg, _ = seg.NextNonEmpty() { + segMR := seg.Range().Intersect(optional) + ts = append(ts, memmap.Translation{ + Source: segMR, + File: mf, + Offset: seg.FileRangeOf(segMR).Start, + Perms: usermem.AnyAccess, + }) + translatedEnd = segMR.End + } + + // Don't return the error returned by f.data.Fill if it occurred outside of + // required. + if translatedEnd < required.End && cerr != nil { + return ts, &memmap.BusError{cerr} + } + if beyondEOF { + return ts, &memmap.BusError{io.EOF} + } + return ts, nil +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (f *fileInodeOperations) InvalidateUnsavable(ctx context.Context) error { + return nil +} + +// GetSeals returns the current set of seals on a memfd inode. +func GetSeals(inode *fs.Inode) (uint32, error) { + if f, ok := inode.InodeOperations.(*fileInodeOperations); ok { + f.dataMu.RLock() + defer f.dataMu.RUnlock() + return f.seals, nil + } + // Not a memfd inode. + return 0, syserror.EINVAL +} + +// AddSeals adds new file seals to a memfd inode. +func AddSeals(inode *fs.Inode, val uint32) error { + if f, ok := inode.InodeOperations.(*fileInodeOperations); ok { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + f.dataMu.Lock() + defer f.dataMu.Unlock() + + if f.seals&linux.F_SEAL_SEAL != 0 { + // Seal applied which prevents addition of any new seals. + return syserror.EPERM + } + + // F_SEAL_WRITE can only be added if there are no active writable maps. + if f.seals&linux.F_SEAL_WRITE == 0 && val&linux.F_SEAL_WRITE != 0 { + if f.writableMappingPages > 0 { + return syserror.EBUSY + } + } + + // Seals can only be added, never removed. + f.seals |= val + return nil + } + // Not a memfd inode. + return syserror.EINVAL +} diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go new file mode 100644 index 000000000..b095312fe --- /dev/null +++ b/pkg/sentry/fs/tmpfs/tmpfs.go @@ -0,0 +1,356 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tmpfs is a filesystem implementation backed by memory. +package tmpfs + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/fs/ramfs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/kernel/pipe" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sentry/usage" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" +) + +var fsInfo = fs.Info{ + Type: linux.TMPFS_MAGIC, + + // TODO(b/29637826): allow configuring a tmpfs size and enforce it. + TotalBlocks: 0, + FreeBlocks: 0, +} + +// rename implements fs.InodeOperations.Rename for tmpfs nodes. +func rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + // Don't allow renames across different mounts. + if newParent.MountSource != oldParent.MountSource { + return syserror.EXDEV + } + + op := oldParent.InodeOperations.(*Dir) + np := newParent.InodeOperations.(*Dir) + return ramfs.Rename(ctx, op.ramfsDir, oldName, np.ramfsDir, newName, replacement) +} + +// Dir is a directory. +// +// +stateify savable +type Dir struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeIsDirTruncate `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + // Ideally this would be embedded, so that we "inherit" all of the + // InodeOperations implemented by ramfs.Dir for free. + // + // However, ramfs.dirFileOperations stores a pointer to a ramfs.Dir, + // and our save/restore package does not allow saving a pointer to an + // embedded field elsewhere. + // + // Thus, we must make the ramfs.Dir is a field, and we delegate all the + // InodeOperation methods to it. + ramfsDir *ramfs.Dir + + // kernel is used to allocate memory as storage for tmpfs Files. + kernel *kernel.Kernel +} + +var _ fs.InodeOperations = (*Dir)(nil) + +// NewDir returns a new directory. +func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode { + d := &Dir{ + ramfsDir: ramfs.NewDir(ctx, contents, owner, perms), + kernel: kernel.KernelFromContext(ctx), + } + + // Manually set the CreateOps. + d.ramfsDir.CreateOps = d.newCreateOps() + + return fs.NewInode(ctx, d, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +// afterLoad is invoked by stateify. +func (d *Dir) afterLoad() { + // Per NewDir, manually set the CreateOps. + d.ramfsDir.CreateOps = d.newCreateOps() +} + +// GetFile implements fs.InodeOperations.GetFile. +func (d *Dir) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return d.ramfsDir.GetFile(ctx, dirent, flags) +} + +// AddLink implements fs.InodeOperations.AddLink. +func (d *Dir) AddLink() { + d.ramfsDir.AddLink() +} + +// DropLink implements fs.InodeOperations.DropLink. +func (d *Dir) DropLink() { + d.ramfsDir.DropLink() +} + +// Bind implements fs.InodeOperations.Bind. +func (d *Dir) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport.BoundEndpoint, perms fs.FilePermissions) (*fs.Dirent, error) { + return d.ramfsDir.Bind(ctx, dir, name, ep, perms) +} + +// Create implements fs.InodeOperations.Create. +func (d *Dir) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perms fs.FilePermissions) (*fs.File, error) { + return d.ramfsDir.Create(ctx, dir, name, flags, perms) +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (d *Dir) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error { + return d.ramfsDir.CreateLink(ctx, dir, oldname, newname) +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error { + return d.ramfsDir.CreateHardLink(ctx, dir, target, name) +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + return d.ramfsDir.CreateDirectory(ctx, dir, name, perms) +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (d *Dir) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + return d.ramfsDir.CreateFifo(ctx, dir, name, perms) +} + +// GetXattr implements fs.InodeOperations.GetXattr. +func (d *Dir) GetXattr(ctx context.Context, i *fs.Inode, name string, size uint64) (string, error) { + return d.ramfsDir.GetXattr(ctx, i, name, size) +} + +// SetXattr implements fs.InodeOperations.SetXattr. +func (d *Dir) SetXattr(ctx context.Context, i *fs.Inode, name, value string, flags uint32) error { + return d.ramfsDir.SetXattr(ctx, i, name, value, flags) +} + +// ListXattr implements fs.InodeOperations.ListXattr. +func (d *Dir) ListXattr(ctx context.Context, i *fs.Inode, size uint64) (map[string]struct{}, error) { + return d.ramfsDir.ListXattr(ctx, i, size) +} + +// RemoveXattr implements fs.InodeOperations.RemoveXattr. +func (d *Dir) RemoveXattr(ctx context.Context, i *fs.Inode, name string) error { + return d.ramfsDir.RemoveXattr(ctx, i, name) +} + +// Lookup implements fs.InodeOperations.Lookup. +func (d *Dir) Lookup(ctx context.Context, i *fs.Inode, p string) (*fs.Dirent, error) { + return d.ramfsDir.Lookup(ctx, i, p) +} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (d *Dir) NotifyStatusChange(ctx context.Context) { + d.ramfsDir.NotifyStatusChange(ctx) +} + +// Remove implements fs.InodeOperations.Remove. +func (d *Dir) Remove(ctx context.Context, i *fs.Inode, name string) error { + return d.ramfsDir.Remove(ctx, i, name) +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (d *Dir) RemoveDirectory(ctx context.Context, i *fs.Inode, name string) error { + return d.ramfsDir.RemoveDirectory(ctx, i, name) +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (d *Dir) UnstableAttr(ctx context.Context, i *fs.Inode) (fs.UnstableAttr, error) { + return d.ramfsDir.UnstableAttr(ctx, i) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (d *Dir) SetPermissions(ctx context.Context, i *fs.Inode, p fs.FilePermissions) bool { + return d.ramfsDir.SetPermissions(ctx, i, p) +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (d *Dir) SetOwner(ctx context.Context, i *fs.Inode, owner fs.FileOwner) error { + return d.ramfsDir.SetOwner(ctx, i, owner) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (d *Dir) SetTimestamps(ctx context.Context, i *fs.Inode, ts fs.TimeSpec) error { + return d.ramfsDir.SetTimestamps(ctx, i, ts) +} + +// newCreateOps builds the custom CreateOps for this Dir. +func (d *Dir) newCreateOps() *ramfs.CreateOps { + return &ramfs.CreateOps{ + NewDir: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + return NewDir(ctx, nil, fs.FileOwnerFromContext(ctx), perms, dir.MountSource), nil + }, + NewFile: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + uattr := fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: perms, + // Always start unlinked. + Links: 0, + }) + iops := NewInMemoryFile(ctx, usage.Tmpfs, uattr) + return fs.NewInode(ctx, iops, dir.MountSource, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.RegularFile, + }), nil + }, + NewSymlink: func(ctx context.Context, dir *fs.Inode, target string) (*fs.Inode, error) { + return NewSymlink(ctx, target, fs.FileOwnerFromContext(ctx), dir.MountSource), nil + }, + NewBoundEndpoint: func(ctx context.Context, dir *fs.Inode, socket transport.BoundEndpoint, perms fs.FilePermissions) (*fs.Inode, error) { + return NewSocket(ctx, socket, fs.FileOwnerFromContext(ctx), perms, dir.MountSource), nil + }, + NewFifo: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + return NewFifo(ctx, fs.FileOwnerFromContext(ctx), perms, dir.MountSource), nil + }, + } +} + +// Rename implements fs.InodeOperations.Rename. +func (d *Dir) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return rename(ctx, oldParent, oldName, newParent, newName, replacement) +} + +// StatFS implements fs.InodeOperations.StatFS. +func (*Dir) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Allocate implements fs.InodeOperations.Allocate. +func (d *Dir) Allocate(ctx context.Context, node *fs.Inode, offset, length int64) error { + return d.ramfsDir.Allocate(ctx, node, offset, length) +} + +// Release implements fs.InodeOperations.Release. +func (d *Dir) Release(ctx context.Context) { + d.ramfsDir.Release(ctx) +} + +// Symlink is a symlink. +// +// +stateify savable +type Symlink struct { + ramfs.Symlink +} + +// NewSymlink returns a new symlink with the provided permissions. +func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs.MountSource) *fs.Inode { + s := &Symlink{Symlink: *ramfs.NewSymlink(ctx, owner, target)} + return fs.NewInode(ctx, s, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Symlink, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (s *Symlink) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return rename(ctx, oldParent, oldName, newParent, newName, replacement) +} + +// StatFS returns the tmpfs info. +func (s *Symlink) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Socket is a socket. +// +// +stateify savable +type Socket struct { + ramfs.Socket + fsutil.InodeNotTruncatable `state:"nosave"` + fsutil.InodeNotAllocatable `state:"nosave"` +} + +// NewSocket returns a new socket with the provided permissions. +func NewSocket(ctx context.Context, socket transport.BoundEndpoint, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode { + s := &Socket{Socket: *ramfs.NewSocket(ctx, socket, owner, perms)} + return fs.NewInode(ctx, s, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Socket, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (s *Socket) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return rename(ctx, oldParent, oldName, newParent, newName, replacement) +} + +// StatFS returns the tmpfs info. +func (s *Socket) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Fifo is a tmpfs named pipe. +// +// +stateify savable +type Fifo struct { + fs.InodeOperations +} + +// NewFifo creates a new named pipe. +func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode { + // First create a pipe. + p := pipe.NewPipe(true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize) + + // Build pipe InodeOperations. + iops := pipe.NewInodeOperations(ctx, perms, p) + + // Wrap the iops with our Fifo. + fifoIops := &Fifo{iops} + + // Build a new Inode. + return fs.NewInode(ctx, fifoIops, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Pipe, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (f *Fifo) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error { + return rename(ctx, oldParent, oldName, newParent, newName, replacement) +} + +// StatFS returns the tmpfs info. +func (*Fifo) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD new file mode 100644 index 000000000..5cb0e0417 --- /dev/null +++ b/pkg/sentry/fs/tty/BUILD @@ -0,0 +1,47 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "tty", + srcs = [ + "dir.go", + "fs.go", + "line_discipline.go", + "master.go", + "queue.go", + "slave.go", + "terminal.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/refs", + "//pkg/safemem", + "//pkg/sentry/arch", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/socket/unix/transport", + "//pkg/sentry/unimpl", + "//pkg/sync", + "//pkg/syserror", + "//pkg/usermem", + "//pkg/waiter", + ], +) + +go_test( + name = "tty_test", + size = "small", + srcs = ["tty_test.go"], + library = ":tty", + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/contexttest", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go new file mode 100644 index 000000000..108654827 --- /dev/null +++ b/pkg/sentry/fs/tty/dir.go @@ -0,0 +1,342 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tty provide pseudoterminals via a devpts filesystem. +package tty + +import ( + "fmt" + "math" + "strconv" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// dirInodeOperations is the root of a devpts mount. +// +// This indirectly manages all terminals within the mount. +// +// New Terminals are created by masterInodeOperations.GetFile, which registers +// the slave Inode in the this directory for discovery via Lookup/Readdir. The +// slave inode is unregistered when the master file is Released, as the slave +// is no longer discoverable at that point. +// +// References on the underlying Terminal are held by masterFileOperations and +// slaveInodeOperations. +// +// masterInodeOperations and slaveInodeOperations hold a pointer to +// dirInodeOperations, which is reference counted by the refcount their +// corresponding Dirents hold on their parent (this directory). +// +// dirInodeOperations implements fs.InodeOperations. +// +// +stateify savable +type dirInodeOperations struct { + fsutil.InodeGenericChecker `state:"nosave"` + fsutil.InodeIsDirAllocate `state:"nosave"` + fsutil.InodeIsDirTruncate `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNoopWriteOut `state:"nosave"` + fsutil.InodeNotMappable `state:"nosave"` + fsutil.InodeNotRenameable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.InodeVirtual `state:"nosave"` + + fsutil.InodeSimpleAttributes + + // msrc is the super block this directory is on. + // + // TODO(chrisko): Plumb this through instead of storing it here. + msrc *fs.MountSource + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // master is the master PTY inode. + master *fs.Inode + + // slaves contains the slave inodes reachable from the directory. + // + // A new slave is added by allocateTerminal and is removed by + // masterFileOperations.Release. + // + // A reference is held on every slave in the map. + slaves map[uint32]*fs.Inode + + // dentryMap is a SortedDentryMap used to implement Readdir containing + // the master and all entries in slaves. + dentryMap *fs.SortedDentryMap + + // next is the next pty index to use. + // + // TODO(b/29356795): reuse indices when ptys are closed. + next uint32 +} + +var _ fs.InodeOperations = (*dirInodeOperations)(nil) + +// newDir creates a new dir with a ptmx file and no terminals. +func newDir(ctx context.Context, m *fs.MountSource) *fs.Inode { + d := &dirInodeOperations{ + InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0555), linux.DEVPTS_SUPER_MAGIC), + msrc: m, + slaves: make(map[uint32]*fs.Inode), + dentryMap: fs.NewSortedDentryMap(nil), + } + // Linux devpts uses a default mode of 0000 for ptmx which can be + // changed with the ptmxmode mount option. However, that default is not + // useful here (since we'd *always* need the mount option, so it is + // accessible by default). + d.master = newMasterInode(ctx, d, fs.RootOwner, fs.FilePermsFromMode(0666)) + d.dentryMap.Add("ptmx", fs.DentAttr{ + Type: d.master.StableAttr.Type, + InodeID: d.master.StableAttr.InodeID, + }) + + return fs.NewInode(ctx, d, m, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id 1 for the directory. See + // fs/devpts/inode.c:devpts_fill_super. + // + // TODO(b/75267214): Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +// Release implements fs.InodeOperations.Release. +func (d *dirInodeOperations) Release(ctx context.Context) { + d.mu.Lock() + defer d.mu.Unlock() + + d.master.DecRef() + if len(d.slaves) != 0 { + panic(fmt.Sprintf("devpts directory still contains active terminals: %+v", d)) + } +} + +// Lookup implements fs.InodeOperations.Lookup. +func (d *dirInodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + d.mu.Lock() + defer d.mu.Unlock() + + // Master? + if name == "ptmx" { + d.master.IncRef() + return fs.NewDirent(ctx, d.master, name), nil + } + + // Slave number? + n, err := strconv.ParseUint(name, 10, 32) + if err != nil { + // Not found. + return nil, syserror.ENOENT + } + + s, ok := d.slaves[uint32(n)] + if !ok { + return nil, syserror.ENOENT + } + + s.IncRef() + return fs.NewDirent(ctx, s, name), nil +} + +// Create implements fs.InodeOperations.Create. +// +// Creation is never allowed. +func (d *dirInodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + return nil, syserror.EACCES +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syserror.EACCES +} + +// CreateLink implements fs.InodeOperations.CreateLink. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error { + return syserror.EACCES +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error { + return syserror.EACCES +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syserror.EACCES +} + +// Remove implements fs.InodeOperations.Remove. +// +// Removal is never allowed. +func (d *dirInodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +// +// Removal is never allowed. +func (d *dirInodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// Bind implements fs.InodeOperations.Bind. +func (d *dirInodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) { + return nil, syserror.EPERM +} + +// GetFile implements fs.InodeOperations.GetFile. +func (d *dirInodeOperations) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &dirFileOperations{di: d}), nil +} + +// allocateTerminal creates a new Terminal and installs a pts node for it. +// +// The caller must call DecRef when done with the returned Terminal. +func (d *dirInodeOperations) allocateTerminal(ctx context.Context) (*Terminal, error) { + d.mu.Lock() + defer d.mu.Unlock() + + n := d.next + if n == math.MaxUint32 { + return nil, syserror.ENOMEM + } + + if _, ok := d.slaves[n]; ok { + panic(fmt.Sprintf("pty index collision; index %d already exists", n)) + } + + t := newTerminal(ctx, d, n) + d.next++ + + // The reference returned by newTerminal is returned to the caller. + // Take another for the slave inode. + t.IncRef() + + // Create a pts node. The owner is based on the context that opens + // ptmx. + creds := auth.CredentialsFromContext(ctx) + uid, gid := creds.EffectiveKUID, creds.EffectiveKGID + slave := newSlaveInode(ctx, d, t, fs.FileOwner{uid, gid}, fs.FilePermsFromMode(0666)) + + d.slaves[n] = slave + d.dentryMap.Add(strconv.FormatUint(uint64(n), 10), fs.DentAttr{ + Type: slave.StableAttr.Type, + InodeID: slave.StableAttr.InodeID, + }) + + return t, nil +} + +// masterClose is called when the master end of t is closed. +func (d *dirInodeOperations) masterClose(t *Terminal) { + d.mu.Lock() + defer d.mu.Unlock() + + // The slave end disappears from the directory when the master end is + // closed, even if the slave end is open elsewhere. + // + // N.B. since we're using a backdoor method to remove a directory entry + // we won't properly fire inotify events like Linux would. + s, ok := d.slaves[t.n] + if !ok { + panic(fmt.Sprintf("Terminal %+v doesn't exist in %+v?", t, d)) + } + + s.DecRef() + delete(d.slaves, t.n) + d.dentryMap.Remove(strconv.FormatUint(uint64(t.n), 10)) +} + +// dirFileOperations are the fs.FileOperations for the directory. +// +// This is nearly identical to fsutil.DirFileOperations, except that it takes +// df.di.mu in IterateDir. +// +// +stateify savable +type dirFileOperations struct { + fsutil.FileNoopRelease `state:"nosave"` + fsutil.FileGenericSeek `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoIoctl `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + // di is the inode operations. + di *dirInodeOperations + + // dirCursor contains the name of the last directory entry that was + // serialized. + dirCursor string +} + +var _ fs.FileOperations = (*dirFileOperations)(nil) + +// IterateDir implements DirIterator.IterateDir. +func (df *dirFileOperations) IterateDir(ctx context.Context, d *fs.Dirent, dirCtx *fs.DirCtx, offset int) (int, error) { + df.di.mu.Lock() + defer df.di.mu.Unlock() + + n, err := fs.GenericReaddir(dirCtx, df.di.dentryMap) + return offset + n, err +} + +// Readdir implements FileOperations.Readdir. +func (df *dirFileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + if root != nil { + defer root.DecRef() + } + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &df.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, df, root, dirCtx, file.Offset()) +} + +// Read implements FileOperations.Read +func (df *dirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} + +// Write implements FileOperations.Write. +func (df *dirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} diff --git a/pkg/sentry/fs/tty/fs.go b/pkg/sentry/fs/tty/fs.go new file mode 100644 index 000000000..8fe05ebe5 --- /dev/null +++ b/pkg/sentry/fs/tty/fs.go @@ -0,0 +1,111 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/device" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/syserror" +) + +// ptsDevice is the pseudo-filesystem device. +var ptsDevice = device.NewAnonDevice() + +// filesystem is a devpts filesystem. +// +// This devpts is always in the new "multi-instance" mode. i.e., it contains a +// ptmx device tied to this mount. +// +// +stateify savable +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// Name matches drivers/devpts/indoe.c:devpts_fs_type.name. +func (*filesystem) Name() string { + return "devpts" +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + // TODO(b/29356795): Users may mount this once the terminals are in a + // usable state. + return false +} + +// AllowUserList allows this filesystem to be listed in /proc/filesystems. +func (*filesystem) AllowUserList() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// MountSource returns a devpts root that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, _ interface{}) (*fs.Inode, error) { + // device is always ignored. + + // No options are supported. + if data != "" { + return nil, syserror.EINVAL + } + + return newDir(ctx, fs.NewMountSource(ctx, &superOperations{}, f, flags)), nil +} + +// superOperations implements fs.MountSourceOperations, preventing caching. +// +// +stateify savable +type superOperations struct{} + +// Revalidate implements fs.DirentOperations.Revalidate. +// +// It always returns true, forcing a Lookup for all entries. +// +// Slave entries are dropped from dir when their master is closed, so an +// existing slave Dirent in the tree is not sufficient to guarantee that it +// still exists on the filesystem. +func (superOperations) Revalidate(context.Context, string, *fs.Inode, *fs.Inode) bool { + return true +} + +// Keep implements fs.DirentOperations.Keep. +// +// Keep returns false because Revalidate would force a lookup on cached entries +// anyways. +func (superOperations) Keep(*fs.Dirent) bool { + return false +} + +// CacheReaddir implements fs.DirentOperations.CacheReaddir. +// +// CacheReaddir returns false because entries change on master operations. +func (superOperations) CacheReaddir() bool { + return false +} + +// ResetInodeMappings implements MountSourceOperations.ResetInodeMappings. +func (superOperations) ResetInodeMappings() {} + +// SaveInodeMapping implements MountSourceOperations.SaveInodeMapping. +func (superOperations) SaveInodeMapping(*fs.Inode, string) {} + +// Destroy implements MountSourceOperations.Destroy. +func (superOperations) Destroy() {} diff --git a/pkg/sentry/fs/tty/line_discipline.go b/pkg/sentry/fs/tty/line_discipline.go new file mode 100644 index 000000000..2e9dd2d55 --- /dev/null +++ b/pkg/sentry/fs/tty/line_discipline.go @@ -0,0 +1,449 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "bytes" + "unicode/utf8" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +const ( + // canonMaxBytes is the number of bytes that fit into a single line of + // terminal input in canonical mode. This corresponds to N_TTY_BUF_SIZE + // in include/linux/tty.h. + canonMaxBytes = 4096 + + // nonCanonMaxBytes is the maximum number of bytes that can be read at + // a time in noncanonical mode. + nonCanonMaxBytes = canonMaxBytes - 1 + + spacesPerTab = 8 +) + +// lineDiscipline dictates how input and output are handled between the +// pseudoterminal (pty) master and slave. It can be configured to alter I/O, +// modify control characters (e.g. Ctrl-C for SIGINT), etc. The following man +// pages are good resources for how to affect the line discipline: +// +// * termios(3) +// * tty_ioctl(4) +// +// This file corresponds most closely to drivers/tty/n_tty.c. +// +// lineDiscipline has a simple structure but supports a multitude of options +// (see the above man pages). It consists of two queues of bytes: one from the +// terminal master to slave (the input queue) and one from slave to master (the +// output queue). When bytes are written to one end of the pty, the line +// discipline reads the bytes, modifies them or takes special action if +// required, and enqueues them to be read by the other end of the pty: +// +// input from terminal +-------------+ input to process (e.g. bash) +// +------------------------>| input queue |---------------------------+ +// | (inputQueueWrite) +-------------+ (inputQueueRead) | +// | | +// | v +// masterFD slaveFD +// ^ | +// | | +// | output to terminal +--------------+ output from process | +// +------------------------| output queue |<--------------------------+ +// (outputQueueRead) +--------------+ (outputQueueWrite) +// +// Lock order: +// termiosMu +// inQueue.mu +// outQueue.mu +// +// +stateify savable +type lineDiscipline struct { + // sizeMu protects size. + sizeMu sync.Mutex `state:"nosave"` + + // size is the terminal size (width and height). + size linux.WindowSize + + // inQueue is the input queue of the terminal. + inQueue queue + + // outQueue is the output queue of the terminal. + outQueue queue + + // termiosMu protects termios. + termiosMu sync.RWMutex `state:"nosave"` + + // termios is the terminal configuration used by the lineDiscipline. + termios linux.KernelTermios + + // column is the location in a row of the cursor. This is important for + // handling certain special characters like backspace. + column int + + // masterWaiter is used to wait on the master end of the TTY. + masterWaiter waiter.Queue `state:"zerovalue"` + + // slaveWaiter is used to wait on the slave end of the TTY. + slaveWaiter waiter.Queue `state:"zerovalue"` +} + +func newLineDiscipline(termios linux.KernelTermios) *lineDiscipline { + ld := lineDiscipline{termios: termios} + ld.inQueue.transformer = &inputQueueTransformer{} + ld.outQueue.transformer = &outputQueueTransformer{} + return &ld +} + +// getTermios gets the linux.Termios for the tty. +func (l *lineDiscipline) getTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + // We must copy a Termios struct, not KernelTermios. + t := l.termios.ToTermios() + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), t, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err +} + +// setTermios sets a linux.Termios for the tty. +func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + l.termiosMu.Lock() + defer l.termiosMu.Unlock() + oldCanonEnabled := l.termios.LEnabled(linux.ICANON) + // We must copy a Termios struct, not KernelTermios. + var t linux.Termios + _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &t, usermem.IOOpts{ + AddressSpaceActive: true, + }) + l.termios.FromTermios(t) + + // If canonical mode is turned off, move bytes from inQueue's wait + // buffer to its read buffer. Anything already in the read buffer is + // now readable. + if oldCanonEnabled && !l.termios.LEnabled(linux.ICANON) { + l.inQueue.mu.Lock() + l.inQueue.pushWaitBufLocked(l) + l.inQueue.readable = true + l.inQueue.mu.Unlock() + l.slaveWaiter.Notify(waiter.EventIn) + } + + return 0, err +} + +func (l *lineDiscipline) windowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error { + l.sizeMu.Lock() + defer l.sizeMu.Unlock() + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), l.size, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return err +} + +func (l *lineDiscipline) setWindowSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error { + l.sizeMu.Lock() + defer l.sizeMu.Unlock() + _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &l.size, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return err +} + +func (l *lineDiscipline) masterReadiness() waiter.EventMask { + // We don't have to lock a termios because the default master termios + // is immutable. + return l.inQueue.writeReadiness(&linux.MasterTermios) | l.outQueue.readReadiness(&linux.MasterTermios) +} + +func (l *lineDiscipline) slaveReadiness() waiter.EventMask { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + return l.outQueue.writeReadiness(&l.termios) | l.inQueue.readReadiness(&l.termios) +} + +func (l *lineDiscipline) inputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error { + return l.inQueue.readableSize(ctx, io, args) +} + +func (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + n, pushed, err := l.inQueue.read(ctx, dst, l) + if err != nil { + return 0, err + } + if n > 0 { + l.masterWaiter.Notify(waiter.EventOut) + if pushed { + l.slaveWaiter.Notify(waiter.EventIn) + } + return n, nil + } + return 0, syserror.ErrWouldBlock +} + +func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + n, err := l.inQueue.write(ctx, src, l) + if err != nil { + return 0, err + } + if n > 0 { + l.slaveWaiter.Notify(waiter.EventIn) + return n, nil + } + return 0, syserror.ErrWouldBlock +} + +func (l *lineDiscipline) outputQueueReadSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error { + return l.outQueue.readableSize(ctx, io, args) +} + +func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + n, pushed, err := l.outQueue.read(ctx, dst, l) + if err != nil { + return 0, err + } + if n > 0 { + l.slaveWaiter.Notify(waiter.EventOut) + if pushed { + l.masterWaiter.Notify(waiter.EventIn) + } + return n, nil + } + return 0, syserror.ErrWouldBlock +} + +func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) { + l.termiosMu.RLock() + defer l.termiosMu.RUnlock() + n, err := l.outQueue.write(ctx, src, l) + if err != nil { + return 0, err + } + if n > 0 { + l.masterWaiter.Notify(waiter.EventIn) + return n, nil + } + return 0, syserror.ErrWouldBlock +} + +// transformer is a helper interface to make it easier to stateify queue. +type transformer interface { + // transform functions require queue's mutex to be held. + transform(*lineDiscipline, *queue, []byte) int +} + +// outputQueueTransformer implements transformer. It performs line discipline +// transformations on the output queue. +// +// +stateify savable +type outputQueueTransformer struct{} + +// transform does output processing for one end of the pty. See +// drivers/tty/n_tty.c:do_output_char for an analogous kernel function. +// +// Preconditions: +// * l.termiosMu must be held for reading. +// * q.mu must be held. +func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int { + // transformOutput is effectively always in noncanonical mode, as the + // master termios never has ICANON set. + + if !l.termios.OEnabled(linux.OPOST) { + q.readBuf = append(q.readBuf, buf...) + if len(q.readBuf) > 0 { + q.readable = true + } + return len(buf) + } + + var ret int + for len(buf) > 0 { + size := l.peek(buf) + cBytes := append([]byte{}, buf[:size]...) + ret += size + buf = buf[size:] + // We're guaranteed that cBytes has at least one element. + switch cBytes[0] { + case '\n': + if l.termios.OEnabled(linux.ONLRET) { + l.column = 0 + } + if l.termios.OEnabled(linux.ONLCR) { + q.readBuf = append(q.readBuf, '\r', '\n') + continue + } + case '\r': + if l.termios.OEnabled(linux.ONOCR) && l.column == 0 { + continue + } + if l.termios.OEnabled(linux.OCRNL) { + cBytes[0] = '\n' + if l.termios.OEnabled(linux.ONLRET) { + l.column = 0 + } + break + } + l.column = 0 + case '\t': + spaces := spacesPerTab - l.column%spacesPerTab + if l.termios.OutputFlags&linux.TABDLY == linux.XTABS { + l.column += spaces + q.readBuf = append(q.readBuf, bytes.Repeat([]byte{' '}, spacesPerTab)...) + continue + } + l.column += spaces + case '\b': + if l.column > 0 { + l.column-- + } + default: + l.column++ + } + q.readBuf = append(q.readBuf, cBytes...) + } + if len(q.readBuf) > 0 { + q.readable = true + } + return ret +} + +// inputQueueTransformer implements transformer. It performs line discipline +// transformations on the input queue. +// +// +stateify savable +type inputQueueTransformer struct{} + +// transform does input processing for one end of the pty. Characters read are +// transformed according to flags set in the termios struct. See +// drivers/tty/n_tty.c:n_tty_receive_char_special for an analogous kernel +// function. +// +// Preconditions: +// * l.termiosMu must be held for reading. +// * q.mu must be held. +func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int { + // If there's a line waiting to be read in canonical mode, don't write + // anything else to the read buffer. + if l.termios.LEnabled(linux.ICANON) && q.readable { + return 0 + } + + maxBytes := nonCanonMaxBytes + if l.termios.LEnabled(linux.ICANON) { + maxBytes = canonMaxBytes + } + + var ret int + for len(buf) > 0 && len(q.readBuf) < canonMaxBytes { + size := l.peek(buf) + cBytes := append([]byte{}, buf[:size]...) + // We're guaranteed that cBytes has at least one element. + switch cBytes[0] { + case '\r': + if l.termios.IEnabled(linux.IGNCR) { + buf = buf[size:] + ret += size + continue + } + if l.termios.IEnabled(linux.ICRNL) { + cBytes[0] = '\n' + } + case '\n': + if l.termios.IEnabled(linux.INLCR) { + cBytes[0] = '\r' + } + } + + // In canonical mode, we discard non-terminating characters + // after the first 4095. + if l.shouldDiscard(q, cBytes) { + buf = buf[size:] + ret += size + continue + } + + // Stop if the buffer would be overfilled. + if len(q.readBuf)+size > maxBytes { + break + } + buf = buf[size:] + ret += size + + // If we get EOF, make the buffer available for reading. + if l.termios.LEnabled(linux.ICANON) && l.termios.IsEOF(cBytes[0]) { + q.readable = true + break + } + + q.readBuf = append(q.readBuf, cBytes...) + + // Anything written to the readBuf will have to be echoed. + if l.termios.LEnabled(linux.ECHO) { + l.outQueue.writeBytes(cBytes, l) + l.masterWaiter.Notify(waiter.EventIn) + } + + // If we finish a line, make it available for reading. + if l.termios.LEnabled(linux.ICANON) && l.termios.IsTerminating(cBytes) { + q.readable = true + break + } + } + + // In noncanonical mode, everything is readable. + if !l.termios.LEnabled(linux.ICANON) && len(q.readBuf) > 0 { + q.readable = true + } + + return ret +} + +// shouldDiscard returns whether c should be discarded. In canonical mode, if +// too many bytes are enqueued, we keep reading input and discarding it until +// we find a terminating character. Signal/echo processing still occurs. +// +// Precondition: +// * l.termiosMu must be held for reading. +// * q.mu must be held. +func (l *lineDiscipline) shouldDiscard(q *queue, cBytes []byte) bool { + return l.termios.LEnabled(linux.ICANON) && len(q.readBuf)+len(cBytes) >= canonMaxBytes && !l.termios.IsTerminating(cBytes) +} + +// peek returns the size in bytes of the next character to process. As long as +// b isn't empty, peek returns a value of at least 1. +func (l *lineDiscipline) peek(b []byte) int { + size := 1 + // If UTF-8 support is enabled, runes might be multiple bytes. + if l.termios.IEnabled(linux.IUTF8) { + _, size = utf8.DecodeRune(b) + } + return size +} + +// LINT.ThenChange(../../fsimpl/devpts/line_discipline.go) diff --git a/pkg/sentry/fs/tty/master.go b/pkg/sentry/fs/tty/master.go new file mode 100644 index 000000000..fe07fa929 --- /dev/null +++ b/pkg/sentry/fs/tty/master.go @@ -0,0 +1,238 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/sentry/unimpl" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// masterInodeOperations are the fs.InodeOperations for the master end of the +// Terminal (ptmx file). +// +// +stateify savable +type masterInodeOperations struct { + fsutil.SimpleFileInode + + // d is the containing dir. + d *dirInodeOperations +} + +var _ fs.InodeOperations = (*masterInodeOperations)(nil) + +// newMasterInode creates an Inode for the master end of a terminal. +func newMasterInode(ctx context.Context, d *dirInodeOperations, owner fs.FileOwner, p fs.FilePermissions) *fs.Inode { + iops := &masterInodeOperations{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, owner, p, linux.DEVPTS_SUPER_MAGIC), + d: d, + } + + return fs.NewInode(ctx, iops, d.msrc, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id 2 for ptmx. See + // fs/devpts/inode.c:mknod_ptmx. + // + // TODO(b/75267214): Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + Type: fs.CharacterDevice, + // See fs/devpts/inode.c:devpts_fill_super. + BlockSize: 1024, + // The PTY master effectively has two different major/minor + // device numbers. + // + // This one is returned by stat for both opened and unopened + // instances of this inode. + // + // When the inode is opened (GetFile), a new device number is + // allocated based on major UNIX98_PTY_MASTER_MAJOR and the tty + // index as minor number. However, this device number is only + // accessible via ioctl(TIOCGDEV) and /proc/TID/stat. + DeviceFileMajor: linux.TTYAUX_MAJOR, + DeviceFileMinor: linux.PTMX_MINOR, + }) +} + +// Release implements fs.InodeOperations.Release. +func (mi *masterInodeOperations) Release(ctx context.Context) { +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*masterInodeOperations) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +// +// It allocates a new terminal. +func (mi *masterInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + t, err := mi.d.allocateTerminal(ctx) + if err != nil { + return nil, err + } + + return fs.NewFile(ctx, d, flags, &masterFileOperations{ + d: mi.d, + t: t, + }), nil +} + +// masterFileOperations are the fs.FileOperations for the master end of a terminal. +// +// +stateify savable +type masterFileOperations struct { + fsutil.FilePipeSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + // d is the containing dir. + d *dirInodeOperations + + // t is the connected Terminal. + t *Terminal +} + +var _ fs.FileOperations = (*masterFileOperations)(nil) + +// Release implements fs.FileOperations.Release. +func (mf *masterFileOperations) Release() { + mf.d.masterClose(mf.t) + mf.t.DecRef() +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (mf *masterFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + mf.t.ld.masterWaiter.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (mf *masterFileOperations) EventUnregister(e *waiter.Entry) { + mf.t.ld.masterWaiter.EventUnregister(e) +} + +// Readiness implements waiter.Waitable.Readiness. +func (mf *masterFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return mf.t.ld.masterReadiness() +} + +// Read implements fs.FileOperations.Read. +func (mf *masterFileOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + return mf.t.ld.outputQueueRead(ctx, dst) +} + +// Write implements fs.FileOperations.Write. +func (mf *masterFileOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + return mf.t.ld.inputQueueWrite(ctx, src) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (mf *masterFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch cmd := args[1].Uint(); cmd { + case linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ + // Get the number of bytes in the output queue read buffer. + return 0, mf.t.ld.outputQueueReadSize(ctx, io, args) + case linux.TCGETS: + // N.B. TCGETS on the master actually returns the configuration + // of the slave end. + return mf.t.ld.getTermios(ctx, io, args) + case linux.TCSETS: + // N.B. TCSETS on the master actually affects the configuration + // of the slave end. + return mf.t.ld.setTermios(ctx, io, args) + case linux.TCSETSW: + // TODO(b/29356795): This should drain the output queue first. + return mf.t.ld.setTermios(ctx, io, args) + case linux.TIOCGPTN: + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(mf.t.n), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + case linux.TIOCSPTLCK: + // TODO(b/29356795): Implement pty locking. For now just pretend we do. + return 0, nil + case linux.TIOCGWINSZ: + return 0, mf.t.ld.windowSize(ctx, io, args) + case linux.TIOCSWINSZ: + return 0, mf.t.ld.setWindowSize(ctx, io, args) + case linux.TIOCSCTTY: + // Make the given terminal the controlling terminal of the + // calling process. + return 0, mf.t.setControllingTTY(ctx, io, args, true /* isMaster */) + case linux.TIOCNOTTY: + // Release this process's controlling terminal. + return 0, mf.t.releaseControllingTTY(ctx, io, args, true /* isMaster */) + case linux.TIOCGPGRP: + // Get the foreground process group. + return mf.t.foregroundProcessGroup(ctx, io, args, true /* isMaster */) + case linux.TIOCSPGRP: + // Set the foreground process group. + return mf.t.setForegroundProcessGroup(ctx, io, args, true /* isMaster */) + default: + maybeEmitUnimplementedEvent(ctx, cmd) + return 0, syserror.ENOTTY + } +} + +// maybeEmitUnimplementedEvent emits unimplemented event if cmd is valid. +func maybeEmitUnimplementedEvent(ctx context.Context, cmd uint32) { + switch cmd { + case linux.TCGETS, + linux.TCSETS, + linux.TCSETSW, + linux.TCSETSF, + linux.TIOCGWINSZ, + linux.TIOCSWINSZ, + linux.TIOCSETD, + linux.TIOCSBRK, + linux.TIOCCBRK, + linux.TCSBRK, + linux.TCSBRKP, + linux.TIOCSTI, + linux.TIOCCONS, + linux.FIONBIO, + linux.TIOCEXCL, + linux.TIOCNXCL, + linux.TIOCGEXCL, + linux.TIOCGSID, + linux.TIOCGETD, + linux.TIOCVHANGUP, + linux.TIOCGDEV, + linux.TIOCMGET, + linux.TIOCMSET, + linux.TIOCMBIC, + linux.TIOCMBIS, + linux.TIOCGICOUNT, + linux.TCFLSH, + linux.TIOCSSERIAL, + linux.TIOCGPTPEER: + + unimpl.EmitUnimplementedEvent(ctx) + } +} + +// LINT.ThenChange(../../fsimpl/devpts/master.go) diff --git a/pkg/sentry/fs/tty/queue.go b/pkg/sentry/fs/tty/queue.go new file mode 100644 index 000000000..ceabb9b1e --- /dev/null +++ b/pkg/sentry/fs/tty/queue.go @@ -0,0 +1,240 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/safemem" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sync" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// waitBufMaxBytes is the maximum size of a wait buffer. It is based on +// TTYB_DEFAULT_MEM_LIMIT. +const waitBufMaxBytes = 131072 + +// queue represents one of the input or output queues between a pty master and +// slave. Bytes written to a queue are added to the read buffer until it is +// full, at which point they are written to the wait buffer. Bytes are +// processed (i.e. undergo termios transformations) as they are added to the +// read buffer. The read buffer is readable when its length is nonzero and +// readable is true. +// +// +stateify savable +type queue struct { + // mu protects everything in queue. + mu sync.Mutex `state:"nosave"` + + // readBuf is buffer of data ready to be read when readable is true. + // This data has been processed. + readBuf []byte + + // waitBuf contains data that can't fit into readBuf. It is put here + // until it can be loaded into the read buffer. waitBuf contains data + // that hasn't been processed. + waitBuf [][]byte + waitBufLen uint64 + + // readable indicates whether the read buffer can be read from. In + // canonical mode, there can be an unterminated line in the read buffer, + // so readable must be checked. + readable bool + + // transform is the the queue's function for transforming bytes + // entering the queue. For example, transform might convert all '\r's + // entering the queue to '\n's. + transformer +} + +// readReadiness returns whether q is ready to be read from. +func (q *queue) readReadiness(t *linux.KernelTermios) waiter.EventMask { + q.mu.Lock() + defer q.mu.Unlock() + if len(q.readBuf) > 0 && q.readable { + return waiter.EventIn + } + return waiter.EventMask(0) +} + +// writeReadiness returns whether q is ready to be written to. +func (q *queue) writeReadiness(t *linux.KernelTermios) waiter.EventMask { + q.mu.Lock() + defer q.mu.Unlock() + if q.waitBufLen < waitBufMaxBytes { + return waiter.EventOut + } + return waiter.EventMask(0) +} + +// readableSize writes the number of readable bytes to userspace. +func (q *queue) readableSize(ctx context.Context, io usermem.IO, args arch.SyscallArguments) error { + q.mu.Lock() + defer q.mu.Unlock() + var size int32 + if q.readable { + size = int32(len(q.readBuf)) + } + + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), size, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return err + +} + +// read reads from q to userspace. It returns the number of bytes read as well +// as whether the read caused more readable data to become available (whether +// data was pushed from the wait buffer to the read buffer). +// +// Preconditions: +// * l.termiosMu must be held for reading. +func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) { + q.mu.Lock() + defer q.mu.Unlock() + + if !q.readable { + return 0, false, syserror.ErrWouldBlock + } + + if dst.NumBytes() > canonMaxBytes { + dst = dst.TakeFirst(canonMaxBytes) + } + + n, err := dst.CopyOutFrom(ctx, safemem.ReaderFunc(func(dst safemem.BlockSeq) (uint64, error) { + src := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(q.readBuf)) + n, err := safemem.CopySeq(dst, src) + if err != nil { + return 0, err + } + q.readBuf = q.readBuf[n:] + + // If we read everything, this queue is no longer readable. + if len(q.readBuf) == 0 { + q.readable = false + } + + return n, nil + })) + if err != nil { + return 0, false, err + } + + // Move data from the queue's wait buffer to its read buffer. + nPushed := q.pushWaitBufLocked(l) + + return int64(n), nPushed > 0, nil +} + +// write writes to q from userspace. +// +// Preconditions: +// * l.termiosMu must be held for reading. +func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) { + q.mu.Lock() + defer q.mu.Unlock() + + // Copy data into the wait buffer. + n, err := src.CopyInTo(ctx, safemem.WriterFunc(func(src safemem.BlockSeq) (uint64, error) { + copyLen := src.NumBytes() + room := waitBufMaxBytes - q.waitBufLen + // If out of room, return EAGAIN. + if room == 0 && copyLen > 0 { + return 0, syserror.ErrWouldBlock + } + // Cap the size of the wait buffer. + if copyLen > room { + copyLen = room + src = src.TakeFirst64(room) + } + buf := make([]byte, copyLen) + + // Copy the data into the wait buffer. + dst := safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)) + n, err := safemem.CopySeq(dst, src) + if err != nil { + return 0, err + } + q.waitBufAppend(buf) + + return n, nil + })) + if err != nil { + return 0, err + } + + // Push data from the wait to the read buffer. + q.pushWaitBufLocked(l) + + return n, nil +} + +// writeBytes writes to q from b. +// +// Preconditions: +// * l.termiosMu must be held for reading. +func (q *queue) writeBytes(b []byte, l *lineDiscipline) { + q.mu.Lock() + defer q.mu.Unlock() + + // Write to the wait buffer. + q.waitBufAppend(b) + q.pushWaitBufLocked(l) +} + +// pushWaitBufLocked fills the queue's read buffer with data from the wait +// buffer. +// +// Preconditions: +// * l.termiosMu must be held for reading. +// * q.mu must be locked. +func (q *queue) pushWaitBufLocked(l *lineDiscipline) int { + if q.waitBufLen == 0 { + return 0 + } + + // Move data from the wait to the read buffer. + var total int + var i int + for i = 0; i < len(q.waitBuf); i++ { + n := q.transform(l, q, q.waitBuf[i]) + total += n + if n != len(q.waitBuf[i]) { + // The read buffer filled up without consuming the + // entire buffer. + q.waitBuf[i] = q.waitBuf[i][n:] + break + } + } + + // Update wait buffer based on consumed data. + q.waitBuf = q.waitBuf[i:] + q.waitBufLen -= uint64(total) + + return total +} + +// Precondition: q.mu must be locked. +func (q *queue) waitBufAppend(b []byte) { + q.waitBuf = append(q.waitBuf, b) + q.waitBufLen += uint64(len(b)) +} + +// LINT.ThenChange(../../fsimpl/devpts/queue.go) diff --git a/pkg/sentry/fs/tty/slave.go b/pkg/sentry/fs/tty/slave.go new file mode 100644 index 000000000..9871f6fc6 --- /dev/null +++ b/pkg/sentry/fs/tty/slave.go @@ -0,0 +1,178 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/fsutil" + "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/pkg/usermem" + "gvisor.dev/gvisor/pkg/waiter" +) + +// LINT.IfChange + +// slaveInodeOperations are the fs.InodeOperations for the slave end of the +// Terminal (pts file). +// +// +stateify savable +type slaveInodeOperations struct { + fsutil.SimpleFileInode + + // d is the containing dir. + d *dirInodeOperations + + // t is the connected Terminal. + t *Terminal +} + +var _ fs.InodeOperations = (*slaveInodeOperations)(nil) + +// newSlaveInode creates an fs.Inode for the slave end of a terminal. +// +// newSlaveInode takes ownership of t. +func newSlaveInode(ctx context.Context, d *dirInodeOperations, t *Terminal, owner fs.FileOwner, p fs.FilePermissions) *fs.Inode { + iops := &slaveInodeOperations{ + SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, owner, p, linux.DEVPTS_SUPER_MAGIC), + d: d, + t: t, + } + + return fs.NewInode(ctx, iops, d.msrc, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id = tty index + 3. See + // fs/devpts/inode.c:devpts_pty_new. + // + // TODO(b/75267214): Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + Type: fs.CharacterDevice, + // See fs/devpts/inode.c:devpts_fill_super. + BlockSize: 1024, + DeviceFileMajor: linux.UNIX98_PTY_SLAVE_MAJOR, + DeviceFileMinor: t.n, + }) +} + +// Release implements fs.InodeOperations.Release. +func (si *slaveInodeOperations) Release(ctx context.Context) { + si.t.DecRef() +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*slaveInodeOperations) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +// +// This may race with destruction of the terminal. If the terminal is gone, it +// returns ENOENT. +func (si *slaveInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &slaveFileOperations{si: si}), nil +} + +// slaveFileOperations are the fs.FileOperations for the slave end of a terminal. +// +// +stateify savable +type slaveFileOperations struct { + fsutil.FilePipeSeek `state:"nosave"` + fsutil.FileNotDirReaddir `state:"nosave"` + fsutil.FileNoFsync `state:"nosave"` + fsutil.FileNoMMap `state:"nosave"` + fsutil.FileNoSplice `state:"nosave"` + fsutil.FileNoopFlush `state:"nosave"` + fsutil.FileUseInodeUnstableAttr `state:"nosave"` + + // si is the inode operations. + si *slaveInodeOperations +} + +var _ fs.FileOperations = (*slaveFileOperations)(nil) + +// Release implements fs.FileOperations.Release. +func (sf *slaveFileOperations) Release() { +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (sf *slaveFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + sf.si.t.ld.slaveWaiter.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (sf *slaveFileOperations) EventUnregister(e *waiter.Entry) { + sf.si.t.ld.slaveWaiter.EventUnregister(e) +} + +// Readiness implements waiter.Waitable.Readiness. +func (sf *slaveFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return sf.si.t.ld.slaveReadiness() +} + +// Read implements fs.FileOperations.Read. +func (sf *slaveFileOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + return sf.si.t.ld.inputQueueRead(ctx, dst) +} + +// Write implements fs.FileOperations.Write. +func (sf *slaveFileOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + return sf.si.t.ld.outputQueueWrite(ctx, src) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (sf *slaveFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch cmd := args[1].Uint(); cmd { + case linux.FIONREAD: // linux.FIONREAD == linux.TIOCINQ + // Get the number of bytes in the input queue read buffer. + return 0, sf.si.t.ld.inputQueueReadSize(ctx, io, args) + case linux.TCGETS: + return sf.si.t.ld.getTermios(ctx, io, args) + case linux.TCSETS: + return sf.si.t.ld.setTermios(ctx, io, args) + case linux.TCSETSW: + // TODO(b/29356795): This should drain the output queue first. + return sf.si.t.ld.setTermios(ctx, io, args) + case linux.TIOCGPTN: + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(sf.si.t.n), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + case linux.TIOCGWINSZ: + return 0, sf.si.t.ld.windowSize(ctx, io, args) + case linux.TIOCSWINSZ: + return 0, sf.si.t.ld.setWindowSize(ctx, io, args) + case linux.TIOCSCTTY: + // Make the given terminal the controlling terminal of the + // calling process. + return 0, sf.si.t.setControllingTTY(ctx, io, args, false /* isMaster */) + case linux.TIOCNOTTY: + // Release this process's controlling terminal. + return 0, sf.si.t.releaseControllingTTY(ctx, io, args, false /* isMaster */) + case linux.TIOCGPGRP: + // Get the foreground process group. + return sf.si.t.foregroundProcessGroup(ctx, io, args, false /* isMaster */) + case linux.TIOCSPGRP: + // Set the foreground process group. + return sf.si.t.setForegroundProcessGroup(ctx, io, args, false /* isMaster */) + default: + maybeEmitUnimplementedEvent(ctx, cmd) + return 0, syserror.ENOTTY + } +} + +// LINT.ThenChange(../../fsimpl/devpts/slave.go) diff --git a/pkg/sentry/fs/tty/terminal.go b/pkg/sentry/fs/tty/terminal.go new file mode 100644 index 000000000..ddcccf4da --- /dev/null +++ b/pkg/sentry/fs/tty/terminal.go @@ -0,0 +1,132 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/refs" + "gvisor.dev/gvisor/pkg/sentry/arch" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/usermem" +) + +// LINT.IfChange + +// Terminal is a pseudoterminal. +// +// +stateify savable +type Terminal struct { + refs.AtomicRefCount + + // n is the terminal index. It is immutable. + n uint32 + + // d is the containing directory. It is immutable. + d *dirInodeOperations + + // ld is the line discipline of the terminal. It is immutable. + ld *lineDiscipline + + // masterKTTY contains the controlling process of the master end of + // this terminal. This field is immutable. + masterKTTY *kernel.TTY + + // slaveKTTY contains the controlling process of the slave end of this + // terminal. This field is immutable. + slaveKTTY *kernel.TTY +} + +func newTerminal(ctx context.Context, d *dirInodeOperations, n uint32) *Terminal { + termios := linux.DefaultSlaveTermios + t := Terminal{ + d: d, + n: n, + ld: newLineDiscipline(termios), + masterKTTY: &kernel.TTY{Index: n}, + slaveKTTY: &kernel.TTY{Index: n}, + } + t.EnableLeakCheck("tty.Terminal") + return &t +} + +// setControllingTTY makes tm the controlling terminal of the calling thread +// group. +func (tm *Terminal) setControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error { + task := kernel.TaskFromContext(ctx) + if task == nil { + panic("setControllingTTY must be called from a task context") + } + + return task.ThreadGroup().SetControllingTTY(tm.tty(isMaster), args[2].Int()) +} + +// releaseControllingTTY removes tm as the controlling terminal of the calling +// thread group. +func (tm *Terminal) releaseControllingTTY(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) error { + task := kernel.TaskFromContext(ctx) + if task == nil { + panic("releaseControllingTTY must be called from a task context") + } + + return task.ThreadGroup().ReleaseControllingTTY(tm.tty(isMaster)) +} + +// foregroundProcessGroup gets the process group ID of tm's foreground process. +func (tm *Terminal) foregroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) { + task := kernel.TaskFromContext(ctx) + if task == nil { + panic("foregroundProcessGroup must be called from a task context") + } + + ret, err := task.ThreadGroup().ForegroundProcessGroup(tm.tty(isMaster)) + if err != nil { + return 0, err + } + + // Write it out to *arg. + _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(ret), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err +} + +// foregroundProcessGroup sets tm's foreground process. +func (tm *Terminal) setForegroundProcessGroup(ctx context.Context, io usermem.IO, args arch.SyscallArguments, isMaster bool) (uintptr, error) { + task := kernel.TaskFromContext(ctx) + if task == nil { + panic("setForegroundProcessGroup must be called from a task context") + } + + // Read in the process group ID. + var pgid int32 + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pgid, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + + ret, err := task.ThreadGroup().SetForegroundProcessGroup(tm.tty(isMaster), kernel.ProcessGroupID(pgid)) + return uintptr(ret), err +} + +func (tm *Terminal) tty(isMaster bool) *kernel.TTY { + if isMaster { + return tm.masterKTTY + } + return tm.slaveKTTY +} + +// LINT.ThenChange(../../fsimpl/devpts/terminal.go) diff --git a/pkg/sentry/fs/tty/tty_test.go b/pkg/sentry/fs/tty/tty_test.go new file mode 100644 index 000000000..2cbc05678 --- /dev/null +++ b/pkg/sentry/fs/tty/tty_test.go @@ -0,0 +1,56 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "testing" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/sentry/contexttest" + "gvisor.dev/gvisor/pkg/usermem" +) + +func TestSimpleMasterToSlave(t *testing.T) { + ld := newLineDiscipline(linux.DefaultSlaveTermios) + ctx := contexttest.Context(t) + inBytes := []byte("hello, tty\n") + src := usermem.BytesIOSequence(inBytes) + outBytes := make([]byte, 32) + dst := usermem.BytesIOSequence(outBytes) + + // Write to the input queue. + nw, err := ld.inputQueueWrite(ctx, src) + if err != nil { + t.Fatalf("error writing to input queue: %v", err) + } + if nw != int64(len(inBytes)) { + t.Fatalf("wrote wrong length: got %d, want %d", nw, len(inBytes)) + } + + // Read from the input queue. + nr, err := ld.inputQueueRead(ctx, dst) + if err != nil { + t.Fatalf("error reading from input queue: %v", err) + } + if nr != int64(len(inBytes)) { + t.Fatalf("read wrong length: got %d, want %d", nr, len(inBytes)) + } + + outStr := string(outBytes[:nr]) + inStr := string(inBytes) + if outStr != inStr { + t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr) + } +} diff --git a/pkg/sentry/fs/user/BUILD b/pkg/sentry/fs/user/BUILD new file mode 100644 index 000000000..66e949c95 --- /dev/null +++ b/pkg/sentry/fs/user/BUILD @@ -0,0 +1,40 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "user", + srcs = [ + "path.go", + "user.go", + ], + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/fspath", + "//pkg/log", + "//pkg/sentry/fs", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/vfs", + "//pkg/syserror", + "//pkg/usermem", + ], +) + +go_test( + name = "user_test", + size = "small", + srcs = ["user_test.go"], + library = ":user", + deps = [ + "//pkg/abi/linux", + "//pkg/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/contexttest", + "//pkg/usermem", + ], +) diff --git a/pkg/sentry/fs/user/path.go b/pkg/sentry/fs/user/path.go new file mode 100644 index 000000000..397e96045 --- /dev/null +++ b/pkg/sentry/fs/user/path.go @@ -0,0 +1,170 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package user + +import ( + "fmt" + "path" + "strings" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/vfs" + "gvisor.dev/gvisor/pkg/syserror" +) + +// ResolveExecutablePath resolves the given executable name given the working +// dir and environment. +func ResolveExecutablePath(ctx context.Context, args *kernel.CreateProcessArgs) (string, error) { + name := args.Filename + if len(name) == 0 { + if len(args.Argv) == 0 { + return "", fmt.Errorf("no filename or command provided") + } + name = args.Argv[0] + } + + // Absolute paths can be used directly. + if path.IsAbs(name) { + return name, nil + } + + // Paths with '/' in them should be joined to the working directory, or + // to the root if working directory is not set. + if strings.IndexByte(name, '/') > 0 { + wd := args.WorkingDirectory + if wd == "" { + wd = "/" + } + if !path.IsAbs(wd) { + return "", fmt.Errorf("working directory %q must be absolute", wd) + } + return path.Join(wd, name), nil + } + + // Otherwise, We must lookup the name in the paths. + paths := getPath(args.Envv) + if kernel.VFS2Enabled { + f, err := resolveVFS2(ctx, args.Credentials, args.MountNamespaceVFS2, paths, name) + if err != nil { + return "", fmt.Errorf("error finding executable %q in PATH %v: %v", name, paths, err) + } + return f, nil + } + + f, err := resolve(ctx, args.MountNamespace, paths, name) + if err != nil { + return "", fmt.Errorf("error finding executable %q in PATH %v: %v", name, paths, err) + } + return f, nil +} + +func resolve(ctx context.Context, mns *fs.MountNamespace, paths []string, name string) (string, error) { + root := fs.RootFromContext(ctx) + if root == nil { + // Caller has no root. Don't bother traversing anything. + return "", syserror.ENOENT + } + defer root.DecRef() + for _, p := range paths { + if !path.IsAbs(p) { + // Relative paths aren't safe, no one should be using them. + log.Warningf("Skipping relative path %q in $PATH", p) + continue + } + + binPath := path.Join(p, name) + traversals := uint(linux.MaxSymlinkTraversals) + d, err := mns.FindInode(ctx, root, nil, binPath, &traversals) + if err == syserror.ENOENT || err == syserror.EACCES { + // Didn't find it here. + continue + } + if err != nil { + return "", err + } + defer d.DecRef() + + // Check that it is a regular file. + if !fs.IsRegular(d.Inode.StableAttr) { + continue + } + + // Check whether we can read and execute the found file. + if err := d.Inode.CheckPermission(ctx, fs.PermMask{Read: true, Execute: true}); err != nil { + log.Infof("Found executable at %q, but user cannot execute it: %v", binPath, err) + continue + } + return path.Join("/", p, name), nil + } + + // Couldn't find it. + return "", syserror.ENOENT +} + +func resolveVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNamespace, paths []string, name string) (string, error) { + root := mns.Root() + defer root.DecRef() + for _, p := range paths { + if !path.IsAbs(p) { + // Relative paths aren't safe, no one should be using them. + log.Warningf("Skipping relative path %q in $PATH", p) + continue + } + + binPath := path.Join(p, name) + pop := &vfs.PathOperation{ + Root: root, + Start: root, + Path: fspath.Parse(binPath), + FollowFinalSymlink: true, + } + opts := &vfs.OpenOptions{ + FileExec: true, + Flags: linux.O_RDONLY, + } + dentry, err := root.Mount().Filesystem().VirtualFilesystem().OpenAt(ctx, creds, pop, opts) + if err == syserror.ENOENT || err == syserror.EACCES { + // Didn't find it here. + continue + } + if err != nil { + return "", err + } + dentry.DecRef() + + return binPath, nil + } + + // Couldn't find it. + return "", syserror.ENOENT +} + +// getPath returns the PATH as a slice of strings given the environment +// variables. +func getPath(env []string) []string { + const prefix = "PATH=" + for _, e := range env { + if strings.HasPrefix(e, prefix) { + return strings.Split(strings.TrimPrefix(e, prefix), ":") + } + } + return nil +} diff --git a/pkg/sentry/fs/user/user.go b/pkg/sentry/fs/user/user.go new file mode 100644 index 000000000..f4d525523 --- /dev/null +++ b/pkg/sentry/fs/user/user.go @@ -0,0 +1,239 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package user contains methods for resolving filesystem paths based on the +// user and their environment. +package user + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/fspath" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/vfs" + "gvisor.dev/gvisor/pkg/usermem" +) + +type fileReader struct { + // Ctx is the context for the file reader. + Ctx context.Context + + // File is the file to read from. + File *fs.File +} + +// Read implements io.Reader.Read. +func (r *fileReader) Read(buf []byte) (int, error) { + n, err := r.File.Readv(r.Ctx, usermem.BytesIOSequence(buf)) + return int(n), err +} + +// getExecUserHome returns the home directory of the executing user read from +// /etc/passwd as read from the container filesystem. +func getExecUserHome(ctx context.Context, rootMns *fs.MountNamespace, uid auth.KUID) (string, error) { + // The default user home directory to return if no user matching the user + // if found in the /etc/passwd found in the image. + const defaultHome = "/" + + // Open the /etc/passwd file from the dirent via the root mount namespace. + mnsRoot := rootMns.Root() + maxTraversals := uint(linux.MaxSymlinkTraversals) + dirent, err := rootMns.FindInode(ctx, mnsRoot, nil, "/etc/passwd", &maxTraversals) + if err != nil { + // NOTE: Ignore errors opening the passwd file. If the passwd file + // doesn't exist we will return the default home directory. + return defaultHome, nil + } + defer dirent.DecRef() + + // Check read permissions on the file. + if err := dirent.Inode.CheckPermission(ctx, fs.PermMask{Read: true}); err != nil { + // NOTE: Ignore permissions errors here and return default root dir. + return defaultHome, nil + } + + // Only open regular files. We don't open other files like named pipes as + // they may block and might present some attack surface to the container. + // Note that runc does not seem to do this kind of checking. + if !fs.IsRegular(dirent.Inode.StableAttr) { + return defaultHome, nil + } + + f, err := dirent.Inode.GetFile(ctx, dirent, fs.FileFlags{Read: true, Directory: false}) + if err != nil { + return "", err + } + defer f.DecRef() + + r := &fileReader{ + Ctx: ctx, + File: f, + } + + return findHomeInPasswd(uint32(uid), r, defaultHome) +} + +type fileReaderVFS2 struct { + ctx context.Context + fd *vfs.FileDescription +} + +func (r *fileReaderVFS2) Read(buf []byte) (int, error) { + n, err := r.fd.Read(r.ctx, usermem.BytesIOSequence(buf), vfs.ReadOptions{}) + return int(n), err +} + +func getExecUserHomeVFS2(ctx context.Context, mns *vfs.MountNamespace, uid auth.KUID) (string, error) { + const defaultHome = "/" + + root := mns.Root() + defer root.DecRef() + + creds := auth.CredentialsFromContext(ctx) + + target := &vfs.PathOperation{ + Root: root, + Start: root, + Path: fspath.Parse("/etc/passwd"), + } + + opts := &vfs.OpenOptions{ + Flags: linux.O_RDONLY, + } + + fd, err := root.Mount().Filesystem().VirtualFilesystem().OpenAt(ctx, creds, target, opts) + if err != nil { + return defaultHome, nil + } + defer fd.DecRef() + + r := &fileReaderVFS2{ + ctx: ctx, + fd: fd, + } + + homeDir, err := findHomeInPasswd(uint32(uid), r, defaultHome) + if err != nil { + return "", err + } + + return homeDir, nil +} + +// MaybeAddExecUserHome returns a new slice with the HOME enviroment variable +// set if the slice does not already contain it, otherwise it returns the +// original slice unmodified. +func MaybeAddExecUserHome(ctx context.Context, mns *fs.MountNamespace, uid auth.KUID, envv []string) ([]string, error) { + // Check if the envv already contains HOME. + for _, env := range envv { + if strings.HasPrefix(env, "HOME=") { + // We have it. Return the original slice unmodified. + return envv, nil + } + } + + // Read /etc/passwd for the user's HOME directory and set the HOME + // environment variable as required by POSIX if it is not overridden by + // the user. + homeDir, err := getExecUserHome(ctx, mns, uid) + if err != nil { + return nil, fmt.Errorf("error reading exec user: %v", err) + } + + return append(envv, "HOME="+homeDir), nil +} + +// MaybeAddExecUserHomeVFS2 returns a new slice with the HOME enviroment +// variable set if the slice does not already contain it, otherwise it returns +// the original slice unmodified. +func MaybeAddExecUserHomeVFS2(ctx context.Context, vmns *vfs.MountNamespace, uid auth.KUID, envv []string) ([]string, error) { + // Check if the envv already contains HOME. + for _, env := range envv { + if strings.HasPrefix(env, "HOME=") { + // We have it. Return the original slice unmodified. + return envv, nil + } + } + + // Read /etc/passwd for the user's HOME directory and set the HOME + // environment variable as required by POSIX if it is not overridden by + // the user. + homeDir, err := getExecUserHomeVFS2(ctx, vmns, uid) + if err != nil { + return nil, fmt.Errorf("error reading exec user: %v", err) + } + return append(envv, "HOME="+homeDir), nil +} + +// findHomeInPasswd parses a passwd file and returns the given user's home +// directory. This function does it's best to replicate the runc's behavior. +func findHomeInPasswd(uid uint32, passwd io.Reader, defaultHome string) (string, error) { + s := bufio.NewScanner(passwd) + + for s.Scan() { + if err := s.Err(); err != nil { + return "", err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // Pull out part of passwd entry. Loosely parse the passwd entry as some + // passwd files could be poorly written and for compatibility with runc. + // + // Per 'man 5 passwd' + // /etc/passwd contains one line for each user account, with seven + // fields delimited by colons (“:”). These fields are: + // + // - login name + // - optional encrypted password + // - numerical user ID + // - numerical group ID + // - user name or comment field + // - user home directory + // - optional user command interpreter + parts := strings.Split(line, ":") + + found := false + homeDir := "" + for i, p := range parts { + switch i { + case 2: + parsedUID, err := strconv.ParseUint(p, 10, 32) + if err == nil && parsedUID == uint64(uid) { + found = true + } + case 5: + homeDir = p + } + } + if found { + // NOTE: If the uid is present but the home directory is not + // present in the /etc/passwd entry we return an empty string. This + // is, for better or worse, what runc does. + return homeDir, nil + } + } + + return defaultHome, nil +} diff --git a/pkg/sentry/fs/user/user_test.go b/pkg/sentry/fs/user/user_test.go new file mode 100644 index 000000000..7d8e9ac7c --- /dev/null +++ b/pkg/sentry/fs/user/user_test.go @@ -0,0 +1,198 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package user + +import ( + "fmt" + "strings" + "testing" + + "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/context" + "gvisor.dev/gvisor/pkg/sentry/fs" + "gvisor.dev/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.dev/gvisor/pkg/sentry/kernel/auth" + "gvisor.dev/gvisor/pkg/sentry/kernel/contexttest" + "gvisor.dev/gvisor/pkg/usermem" +) + +// createEtcPasswd creates /etc/passwd with the given contents and mode. If +// mode is empty, then no file will be created. If mode is not a regular file +// mode, then contents is ignored. +func createEtcPasswd(ctx context.Context, root *fs.Dirent, contents string, mode linux.FileMode) error { + if err := root.CreateDirectory(ctx, root, "etc", fs.FilePermsFromMode(0755)); err != nil { + return err + } + etc, err := root.Walk(ctx, root, "etc") + if err != nil { + return err + } + defer etc.DecRef() + switch mode.FileType() { + case 0: + // Don't create anything. + return nil + case linux.S_IFREG: + passwd, err := etc.Create(ctx, root, "passwd", fs.FileFlags{Write: true}, fs.FilePermsFromMode(mode)) + if err != nil { + return err + } + defer passwd.DecRef() + if _, err := passwd.Writev(ctx, usermem.BytesIOSequence([]byte(contents))); err != nil { + return err + } + return nil + case linux.S_IFDIR: + return etc.CreateDirectory(ctx, root, "passwd", fs.FilePermsFromMode(mode)) + case linux.S_IFIFO: + return etc.CreateFifo(ctx, root, "passwd", fs.FilePermsFromMode(mode)) + default: + return fmt.Errorf("unknown file type %x", mode.FileType()) + } +} + +// TestGetExecUserHome tests the getExecUserHome function. +func TestGetExecUserHome(t *testing.T) { + tests := map[string]struct { + uid auth.KUID + passwdContents string + passwdMode linux.FileMode + expected string + }{ + "success": { + uid: 1000, + passwdContents: "adin::1000:1111::/home/adin:/bin/sh", + passwdMode: linux.S_IFREG | 0666, + expected: "/home/adin", + }, + "no_perms": { + uid: 1000, + passwdContents: "adin::1000:1111::/home/adin:/bin/sh", + passwdMode: linux.S_IFREG, + expected: "/", + }, + "no_passwd": { + uid: 1000, + expected: "/", + }, + "directory": { + uid: 1000, + passwdMode: linux.S_IFDIR | 0666, + expected: "/", + }, + // Currently we don't allow named pipes. + "named_pipe": { + uid: 1000, + passwdMode: linux.S_IFIFO | 0666, + expected: "/", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx := contexttest.Context(t) + msrc := fs.NewPseudoMountSource(ctx) + rootInode := tmpfs.NewDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0777), msrc) + + mns, err := fs.NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + defer mns.DecRef() + root := mns.Root() + defer root.DecRef() + ctx = fs.WithRoot(ctx, root) + + if err := createEtcPasswd(ctx, root, tc.passwdContents, tc.passwdMode); err != nil { + t.Fatalf("createEtcPasswd failed: %v", err) + } + + got, err := getExecUserHome(ctx, mns, tc.uid) + if err != nil { + t.Fatalf("failed to get user home: %v", err) + } + + if got != tc.expected { + t.Fatalf("expected %v, got: %v", tc.expected, got) + } + }) + } +} + +// TestFindHomeInPasswd tests the findHomeInPasswd function's passwd file parsing. +func TestFindHomeInPasswd(t *testing.T) { + tests := map[string]struct { + uid uint32 + passwd string + expected string + def string + }{ + "empty": { + uid: 1000, + passwd: "", + expected: "/", + def: "/", + }, + "whitespace": { + uid: 1000, + passwd: " ", + expected: "/", + def: "/", + }, + "full": { + uid: 1000, + passwd: "adin::1000:1111::/home/adin:/bin/sh", + expected: "/home/adin", + def: "/", + }, + // For better or worse, this is how runc works. + "partial": { + uid: 1000, + passwd: "adin::1000:1111:", + expected: "", + def: "/", + }, + "multiple": { + uid: 1001, + passwd: "adin::1000:1111::/home/adin:/bin/sh\nian::1001:1111::/home/ian:/bin/sh", + expected: "/home/ian", + def: "/", + }, + "duplicate": { + uid: 1000, + passwd: "adin::1000:1111::/home/adin:/bin/sh\nian::1000:1111::/home/ian:/bin/sh", + expected: "/home/adin", + def: "/", + }, + "empty_lines": { + uid: 1001, + passwd: "adin::1000:1111::/home/adin:/bin/sh\n\n\nian::1001:1111::/home/ian:/bin/sh", + expected: "/home/ian", + def: "/", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + got, err := findHomeInPasswd(tc.uid, strings.NewReader(tc.passwd), tc.def) + if err != nil { + t.Fatalf("error parsing passwd: %v", err) + } + if tc.expected != got { + t.Fatalf("expected %v, got: %v", tc.expected, got) + } + }) + } +} |