summaryrefslogtreecommitdiffhomepage
path: root/pkg/sentry
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sentry')
-rw-r--r--pkg/sentry/arch/BUILD2
-rw-r--r--pkg/sentry/arch/arch_aarch64.go11
-rw-r--r--pkg/sentry/arch/arch_amd64.go1
-rw-r--r--pkg/sentry/arch/arch_arm64.go1
-rw-r--r--pkg/sentry/arch/arch_state_x86.go1
-rw-r--r--pkg/sentry/arch/arch_x86.go11
-rw-r--r--pkg/sentry/arch/arch_x86_impl.go2
-rw-r--r--pkg/sentry/arch/fpu/BUILD3
-rw-r--r--pkg/sentry/arch/fpu/fpu.go13
-rw-r--r--pkg/sentry/arch/fpu/fpu_amd64.go7
-rw-r--r--pkg/sentry/arch/fpu/fpu_arm64.go1
-rw-r--r--pkg/sentry/arch/fpu/fpu_unsafe.go (renamed from pkg/sentry/fsimpl/ext/disklayout/superblock_test.go)25
-rw-r--r--pkg/sentry/arch/signal_amd64.go1
-rw-r--r--pkg/sentry/arch/signal_arm64.go1
-rw-r--r--pkg/sentry/arch/syscalls_amd64.go1
-rw-r--r--pkg/sentry/arch/syscalls_arm64.go1
-rw-r--r--pkg/sentry/control/BUILD16
-rw-r--r--pkg/sentry/control/control.proto40
-rw-r--r--pkg/sentry/control/events.go65
-rw-r--r--pkg/sentry/control/fs.go93
-rw-r--r--pkg/sentry/control/lifecycle.go (renamed from pkg/sentry/fsimpl/ext/disklayout/extent_test.go)30
-rw-r--r--pkg/sentry/control/logging.go24
-rw-r--r--pkg/sentry/control/pprof.go30
-rw-r--r--pkg/sentry/control/proc.go4
-rw-r--r--pkg/sentry/control/state.go3
-rw-r--r--pkg/sentry/control/usage.go183
-rw-r--r--pkg/sentry/devices/memdev/BUILD2
-rw-r--r--pkg/sentry/devices/memdev/full.go6
-rw-r--r--pkg/sentry/devices/quotedev/BUILD16
-rw-r--r--pkg/sentry/devices/quotedev/quotedev.go52
-rw-r--r--pkg/sentry/devices/ttydev/BUILD2
-rw-r--r--pkg/sentry/devices/ttydev/ttydev.go4
-rw-r--r--pkg/sentry/devices/tundev/BUILD2
-rw-r--r--pkg/sentry/devices/tundev/tundev.go8
-rw-r--r--pkg/sentry/fdimport/BUILD1
-rw-r--r--pkg/sentry/fdimport/fdimport.go22
-rw-r--r--pkg/sentry/fs/BUILD4
-rw-r--r--pkg/sentry/fs/copy_up.go26
-rw-r--r--pkg/sentry/fs/dev/BUILD2
-rw-r--r--pkg/sentry/fs/dev/full.go4
-rw-r--r--pkg/sentry/fs/dev/net_tun.go8
-rw-r--r--pkg/sentry/fs/dirent.go104
-rw-r--r--pkg/sentry/fs/fdpipe/BUILD5
-rw-r--r--pkg/sentry/fs/fdpipe/pipe.go10
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_opener.go20
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_opener_test.go21
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_test.go30
-rw-r--r--pkg/sentry/fs/file.go50
-rw-r--r--pkg/sentry/fs/file_operations.go2
-rw-r--r--pkg/sentry/fs/file_overlay.go26
-rw-r--r--pkg/sentry/fs/fs.go26
-rw-r--r--pkg/sentry/fs/fsutil/BUILD4
-rw-r--r--pkg/sentry/fs/fsutil/file.go54
-rw-r--r--pkg/sentry/fs/fsutil/host_file_mapper.go21
-rw-r--r--pkg/sentry/fs/fsutil/inode.go60
-rw-r--r--pkg/sentry/fs/fsutil/inode_cached_test.go4
-rw-r--r--pkg/sentry/fs/gofer/BUILD5
-rw-r--r--pkg/sentry/fs/gofer/file.go6
-rw-r--r--pkg/sentry/fs/gofer/gofer_test.go8
-rw-r--r--pkg/sentry/fs/gofer/inode.go15
-rw-r--r--pkg/sentry/fs/gofer/inode_state.go10
-rw-r--r--pkg/sentry/fs/gofer/path.go118
-rw-r--r--pkg/sentry/fs/gofer/session.go17
-rw-r--r--pkg/sentry/fs/gofer/socket.go9
-rw-r--r--pkg/sentry/fs/host/BUILD4
-rw-r--r--pkg/sentry/fs/host/file.go8
-rw-r--r--pkg/sentry/fs/host/host.go4
-rw-r--r--pkg/sentry/fs/host/inode.go32
-rw-r--r--pkg/sentry/fs/host/socket.go6
-rw-r--r--pkg/sentry/fs/host/socket_iovec.go10
-rw-r--r--pkg/sentry/fs/host/tty.go26
-rw-r--r--pkg/sentry/fs/host/util.go6
-rw-r--r--pkg/sentry/fs/host/util_amd64_unsafe.go1
-rw-r--r--pkg/sentry/fs/host/util_arm64_unsafe.go1
-rw-r--r--pkg/sentry/fs/inode.go10
-rw-r--r--pkg/sentry/fs/inode_operations.go4
-rw-r--r--pkg/sentry/fs/inode_overlay.go26
-rw-r--r--pkg/sentry/fs/inode_overlay_test.go8
-rw-r--r--pkg/sentry/fs/inotify.go26
-rw-r--r--pkg/sentry/fs/mock.go6
-rw-r--r--pkg/sentry/fs/mounts.go18
-rw-r--r--pkg/sentry/fs/overlay.go4
-rw-r--r--pkg/sentry/fs/proc/BUILD2
-rw-r--r--pkg/sentry/fs/proc/exec_args.go6
-rw-r--r--pkg/sentry/fs/proc/fds.go6
-rw-r--r--pkg/sentry/fs/proc/net.go4
-rw-r--r--pkg/sentry/fs/proc/proc.go12
-rw-r--r--pkg/sentry/fs/proc/seqfile/BUILD2
-rw-r--r--pkg/sentry/fs/proc/seqfile/seqfile.go4
-rw-r--r--pkg/sentry/fs/proc/sys_net.go4
-rw-r--r--pkg/sentry/fs/proc/task.go156
-rw-r--r--pkg/sentry/fs/proc/uid_gid_map.go10
-rw-r--r--pkg/sentry/fs/proc/uptime.go4
-rw-r--r--pkg/sentry/fs/ramfs/BUILD2
-rw-r--r--pkg/sentry/fs/ramfs/dir.go38
-rw-r--r--pkg/sentry/fs/ramfs/socket.go4
-rw-r--r--pkg/sentry/fs/splice.go24
-rw-r--r--pkg/sentry/fs/timerfd/BUILD2
-rw-r--r--pkg/sentry/fs/timerfd/timerfd.go8
-rw-r--r--pkg/sentry/fs/tmpfs/BUILD2
-rw-r--r--pkg/sentry/fs/tmpfs/inode_file.go24
-rw-r--r--pkg/sentry/fs/tmpfs/tmpfs.go4
-rw-r--r--pkg/sentry/fs/tty/BUILD2
-rw-r--r--pkg/sentry/fs/tty/dir.go28
-rw-r--r--pkg/sentry/fs/tty/fs.go4
-rw-r--r--pkg/sentry/fs/tty/line_discipline.go10
-rw-r--r--pkg/sentry/fs/tty/master.go6
-rw-r--r--pkg/sentry/fs/tty/queue.go6
-rw-r--r--pkg/sentry/fs/tty/replica.go6
-rw-r--r--pkg/sentry/fs/user/BUILD2
-rw-r--r--pkg/sentry/fs/user/path.go12
-rw-r--r--pkg/sentry/fsbridge/BUILD2
-rw-r--r--pkg/sentry/fsbridge/fs.go8
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/BUILD3
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/base.go13
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/cgroupfs.go194
-rw-r--r--pkg/sentry/fsimpl/cgroupfs/memory.go30
-rw-r--r--pkg/sentry/fsimpl/devpts/BUILD3
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts.go10
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts_test.go34
-rw-r--r--pkg/sentry/fsimpl/devpts/line_discipline.go67
-rw-r--r--pkg/sentry/fsimpl/devpts/master.go8
-rw-r--r--pkg/sentry/fsimpl/devpts/queue.go46
-rw-r--r--pkg/sentry/fsimpl/devpts/replica.go8
-rw-r--r--pkg/sentry/fsimpl/eventfd/BUILD2
-rw-r--r--pkg/sentry/fsimpl/eventfd/eventfd.go10
-rw-r--r--pkg/sentry/fsimpl/ext/BUILD103
-rw-r--r--pkg/sentry/fsimpl/ext/README.md117
-rw-r--r--pkg/sentry/fsimpl/ext/assets/README.md36
-rw-r--r--pkg/sentry/fsimpl/ext/assets/bigfile.txt41
-rw-r--r--pkg/sentry/fsimpl/ext/assets/file.txt1
l---------pkg/sentry/fsimpl/ext/assets/symlink.txt1
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext2bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext3bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/assets/tiny.ext4bin65536 -> 0 bytes
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/BUILD17
-rw-r--r--pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go211
-rwxr-xr-xpkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh72
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_file.go203
-rw-r--r--pkg/sentry/fsimpl/ext/block_map_test.go160
-rw-r--r--pkg/sentry/fsimpl/ext/dentry.go82
-rw-r--r--pkg/sentry/fsimpl/ext/directory.go312
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/BUILD48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group.go143
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_32.go74
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_64.go95
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/block_group_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent.go75
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_new.go63
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_old.go51
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/dirent_test.go28
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/disklayout.go48
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/extent.go155
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode.go277
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_new.go98
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_old.go119
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/inode_test.go224
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock.go477
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_32.go78
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_64.go97
-rw-r--r--pkg/sentry/fsimpl/ext/disklayout/superblock_old.go107
-rw-r--r--pkg/sentry/fsimpl/ext/ext.go159
-rw-r--r--pkg/sentry/fsimpl/ext/ext_test.go926
-rw-r--r--pkg/sentry/fsimpl/ext/extent_file.go239
-rw-r--r--pkg/sentry/fsimpl/ext/extent_test.go266
-rw-r--r--pkg/sentry/fsimpl/ext/file_description.go65
-rw-r--r--pkg/sentry/fsimpl/ext/filesystem.go555
-rw-r--r--pkg/sentry/fsimpl/ext/inode.go246
-rw-r--r--pkg/sentry/fsimpl/ext/regular_file.go155
-rw-r--r--pkg/sentry/fsimpl/ext/symlink.go115
-rw-r--r--pkg/sentry/fsimpl/ext/utils.go94
-rw-r--r--pkg/sentry/fsimpl/fuse/BUILD4
-rw-r--r--pkg/sentry/fsimpl/fuse/connection.go8
-rw-r--r--pkg/sentry/fsimpl/fuse/connection_test.go4
-rw-r--r--pkg/sentry/fsimpl/fuse/dev.go36
-rw-r--r--pkg/sentry/fsimpl/fuse/dev_test.go4
-rw-r--r--pkg/sentry/fsimpl/fuse/directory.go12
-rw-r--r--pkg/sentry/fsimpl/fuse/fusefs.go60
-rw-r--r--pkg/sentry/fsimpl/fuse/read_write.go8
-rw-r--r--pkg/sentry/fsimpl/fuse/regular_file.go22
-rw-r--r--pkg/sentry/fsimpl/gofer/BUILD2
-rw-r--r--pkg/sentry/fsimpl/gofer/directory.go8
-rw-r--r--pkg/sentry/fsimpl/gofer/filesystem.go174
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go168
-rw-r--r--pkg/sentry/fsimpl/gofer/handle.go41
-rw-r--r--pkg/sentry/fsimpl/gofer/host_named_pipe.go6
-rw-r--r--pkg/sentry/fsimpl/gofer/p9file.go4
-rw-r--r--pkg/sentry/fsimpl/gofer/regular_file.go101
-rw-r--r--pkg/sentry/fsimpl/gofer/revalidate.go10
-rw-r--r--pkg/sentry/fsimpl/gofer/save_restore.go8
-rw-r--r--pkg/sentry/fsimpl/gofer/special_file.go166
-rw-r--r--pkg/sentry/fsimpl/gofer/symlink.go2
-rw-r--r--pkg/sentry/fsimpl/host/BUILD3
-rw-r--r--pkg/sentry/fsimpl/host/host.go215
-rw-r--r--pkg/sentry/fsimpl/host/socket.go6
-rw-r--r--pkg/sentry/fsimpl/host/socket_iovec.go10
-rw-r--r--pkg/sentry/fsimpl/host/tty.go26
-rw-r--r--pkg/sentry/fsimpl/host/util.go6
-rw-r--r--pkg/sentry/fsimpl/kernfs/BUILD5
-rw-r--r--pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go6
-rw-r--r--pkg/sentry/fsimpl/kernfs/fd_impl_util.go8
-rw-r--r--pkg/sentry/fsimpl/kernfs/filesystem.go98
-rw-r--r--pkg/sentry/fsimpl/kernfs/inode_impl_util.go69
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs.go121
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_test.go75
-rw-r--r--pkg/sentry/fsimpl/kernfs/symlink.go4
-rw-r--r--pkg/sentry/fsimpl/kernfs/synthetic_directory.go12
-rw-r--r--pkg/sentry/fsimpl/overlay/BUILD2
-rw-r--r--pkg/sentry/fsimpl/overlay/copy_up.go21
-rw-r--r--pkg/sentry/fsimpl/overlay/directory.go12
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go134
-rw-r--r--pkg/sentry/fsimpl/overlay/overlay.go24
-rw-r--r--pkg/sentry/fsimpl/overlay/regular_file.go4
-rw-r--r--pkg/sentry/fsimpl/pipefs/BUILD2
-rw-r--r--pkg/sentry/fsimpl/pipefs/pipefs.go4
-rw-r--r--pkg/sentry/fsimpl/proc/BUILD4
-rw-r--r--pkg/sentry/fsimpl/proc/filesystem.go4
-rw-r--r--pkg/sentry/fsimpl/proc/subtasks.go20
-rw-r--r--pkg/sentry/fsimpl/proc/task.go12
-rw-r--r--pkg/sentry/fsimpl/proc/task_fds.go40
-rw-r--r--pkg/sentry/fsimpl/proc/task_files.go247
-rw-r--r--pkg/sentry/fsimpl/proc/task_net.go4
-rw-r--r--pkg/sentry/fsimpl/proc/tasks.go6
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_files.go14
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_sys.go14
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_test.go6
-rw-r--r--pkg/sentry/fsimpl/proc/yama.go6
-rw-r--r--pkg/sentry/fsimpl/signalfd/BUILD2
-rw-r--r--pkg/sentry/fsimpl/signalfd/signalfd.go4
-rw-r--r--pkg/sentry/fsimpl/sockfs/BUILD2
-rw-r--r--pkg/sentry/fsimpl/sockfs/sockfs.go4
-rw-r--r--pkg/sentry/fsimpl/sys/BUILD2
-rw-r--r--pkg/sentry/fsimpl/sys/kcov.go6
-rw-r--r--pkg/sentry/fsimpl/sys/sys.go6
-rw-r--r--pkg/sentry/fsimpl/testutil/kernel.go7
-rw-r--r--pkg/sentry/fsimpl/timerfd/BUILD2
-rw-r--r--pkg/sentry/fsimpl/timerfd/timerfd.go6
-rw-r--r--pkg/sentry/fsimpl/tmpfs/BUILD6
-rw-r--r--pkg/sentry/fsimpl/tmpfs/benchmark_test.go8
-rw-r--r--pkg/sentry/fsimpl/tmpfs/directory.go6
-rw-r--r--pkg/sentry/fsimpl/tmpfs/filesystem.go110
-rw-r--r--pkg/sentry/fsimpl/tmpfs/pipe_test.go6
-rw-r--r--pkg/sentry/fsimpl/tmpfs/regular_file.go34
-rw-r--r--pkg/sentry/fsimpl/tmpfs/regular_file_test.go6
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs.go26
-rw-r--r--pkg/sentry/fsimpl/verity/BUILD18
-rw-r--r--pkg/sentry/fsimpl/verity/filesystem.go159
-rw-r--r--pkg/sentry/fsimpl/verity/verity.go363
-rw-r--r--pkg/sentry/fsimpl/verity/verity_test.go22
-rw-r--r--pkg/sentry/hostfd/hostfd_linux.go12
-rw-r--r--pkg/sentry/hostfd/hostfd_unsafe.go17
-rw-r--r--pkg/sentry/inet/inet.go3
-rw-r--r--pkg/sentry/inet/test_stack.go50
-rw-r--r--pkg/sentry/kernel/BUILD9
-rw-r--r--pkg/sentry/kernel/abstract_socket_namespace.go8
-rw-r--r--pkg/sentry/kernel/auth/BUILD2
-rw-r--r--pkg/sentry/kernel/auth/credentials.go14
-rw-r--r--pkg/sentry/kernel/auth/id_map.go46
-rw-r--r--pkg/sentry/kernel/auth/user_namespace.go8
-rw-r--r--pkg/sentry/kernel/cgroup.go1
-rw-r--r--pkg/sentry/kernel/eventfd/BUILD2
-rw-r--r--pkg/sentry/kernel/eventfd/eventfd.go10
-rw-r--r--pkg/sentry/kernel/fasync/BUILD2
-rw-r--r--pkg/sentry/kernel/fasync/fasync.go4
-rw-r--r--pkg/sentry/kernel/fd_table.go200
-rw-r--r--pkg/sentry/kernel/fd_table_unsafe.go11
-rw-r--r--pkg/sentry/kernel/futex/BUILD4
-rw-r--r--pkg/sentry/kernel/futex/futex.go59
-rw-r--r--pkg/sentry/kernel/futex/futex_test.go4
-rw-r--r--pkg/sentry/kernel/ipc/BUILD20
-rw-r--r--pkg/sentry/kernel/ipc/object.go150
-rw-r--r--pkg/sentry/kernel/ipc/registry.go196
-rw-r--r--pkg/sentry/kernel/ipc_namespace.go8
-rw-r--r--pkg/sentry/kernel/kcov.go20
-rw-r--r--pkg/sentry/kernel/kernel.go75
-rw-r--r--pkg/sentry/kernel/kernel_opts.go3
-rw-r--r--pkg/sentry/kernel/msgqueue/BUILD36
-rw-r--r--pkg/sentry/kernel/msgqueue/msgqueue.go618
-rw-r--r--pkg/sentry/kernel/pipe/BUILD4
-rw-r--r--pkg/sentry/kernel/pipe/node.go12
-rw-r--r--pkg/sentry/kernel/pipe/node_test.go6
-rw-r--r--pkg/sentry/kernel/pipe/pipe.go16
-rw-r--r--pkg/sentry/kernel/pipe/pipe_test.go12
-rw-r--r--pkg/sentry/kernel/pipe/pipe_unsafe.go2
-rw-r--r--pkg/sentry/kernel/pipe/pipe_util.go6
-rw-r--r--pkg/sentry/kernel/pipe/vfs.go16
-rw-r--r--pkg/sentry/kernel/posixtimer.go18
-rw-r--r--pkg/sentry/kernel/ptrace.go61
-rw-r--r--pkg/sentry/kernel/ptrace_amd64.go5
-rw-r--r--pkg/sentry/kernel/ptrace_arm64.go5
-rw-r--r--pkg/sentry/kernel/rseq.go30
-rw-r--r--pkg/sentry/kernel/seccomp.go4
-rw-r--r--pkg/sentry/kernel/semaphore/BUILD15
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore.go314
-rw-r--r--pkg/sentry/kernel/semaphore/semaphore_test.go30
-rw-r--r--pkg/sentry/kernel/sessions.go28
-rw-r--r--pkg/sentry/kernel/shm/BUILD3
-rw-r--r--pkg/sentry/kernel/shm/shm.go288
-rw-r--r--pkg/sentry/kernel/signalfd/BUILD2
-rw-r--r--pkg/sentry/kernel/signalfd/signalfd.go6
-rw-r--r--pkg/sentry/kernel/task.go37
-rw-r--r--pkg/sentry/kernel/task_acct.go6
-rw-r--r--pkg/sentry/kernel/task_block.go16
-rw-r--r--pkg/sentry/kernel/task_cgroup.go6
-rw-r--r--pkg/sentry/kernel/task_clone.go301
-rw-r--r--pkg/sentry/kernel/task_exec.go12
-rw-r--r--pkg/sentry/kernel/task_exit.go122
-rw-r--r--pkg/sentry/kernel/task_identity.go46
-rw-r--r--pkg/sentry/kernel/task_image.go6
-rw-r--r--pkg/sentry/kernel/task_log.go8
-rw-r--r--pkg/sentry/kernel/task_run.go8
-rw-r--r--pkg/sentry/kernel/task_sched.go4
-rw-r--r--pkg/sentry/kernel/task_signals.go47
-rw-r--r--pkg/sentry/kernel/task_start.go8
-rw-r--r--pkg/sentry/kernel/task_syscall.go19
-rw-r--r--pkg/sentry/kernel/task_usermem.go22
-rw-r--r--pkg/sentry/kernel/thread_group.go39
-rw-r--r--pkg/sentry/kernel/time/BUILD2
-rw-r--r--pkg/sentry/kernel/time/time.go6
-rw-r--r--pkg/sentry/kernel/timekeeper.go42
-rw-r--r--pkg/sentry/kernel/timekeeper_test.go4
-rw-r--r--pkg/sentry/loader/BUILD3
-rw-r--r--pkg/sentry/loader/elf.go72
-rw-r--r--pkg/sentry/loader/interpreter.go8
-rw-r--r--pkg/sentry/loader/loader.go27
-rw-r--r--pkg/sentry/loader/vdso.go74
-rw-r--r--pkg/sentry/memmap/BUILD1
-rw-r--r--pkg/sentry/mm/BUILD4
-rw-r--r--pkg/sentry/mm/aio_context.go40
-rw-r--r--pkg/sentry/mm/io.go26
-rw-r--r--pkg/sentry/mm/mm.go1
-rw-r--r--pkg/sentry/mm/mm_test.go8
-rw-r--r--pkg/sentry/mm/pma.go47
-rw-r--r--pkg/sentry/mm/shm.go6
-rw-r--r--pkg/sentry/mm/special_mappable.go8
-rw-r--r--pkg/sentry/mm/syscalls.go137
-rw-r--r--pkg/sentry/mm/vma.go20
-rw-r--r--pkg/sentry/pgalloc/BUILD2
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go8
-rw-r--r--pkg/sentry/platform/kvm/bluepill_amd64.go28
-rw-r--r--pkg/sentry/platform/kvm/bluepill_amd64.s8
-rw-r--r--pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go1
-rw-r--r--pkg/sentry/platform/kvm/bluepill_arm64.go24
-rw-r--r--pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go30
-rw-r--r--pkg/sentry/platform/kvm/bluepill_fault.go8
-rw-r--r--pkg/sentry/platform/kvm/bluepill_unsafe.go15
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64.go1
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_test.go5
-rw-r--r--pkg/sentry/platform/kvm/kvm_amd64_unsafe.go1
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64.go1
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_test.go1
-rw-r--r--pkg/sentry/platform/kvm/kvm_arm64_unsafe.go1
-rw-r--r--pkg/sentry/platform/kvm/kvm_test.go34
-rw-r--r--pkg/sentry/platform/kvm/machine.go25
-rw-r--r--pkg/sentry/platform/kvm/machine_amd64.go9
-rw-r--r--pkg/sentry/platform/kvm/machine_amd64_unsafe.go1
-rw-r--r--pkg/sentry/platform/kvm/machine_arm64.go1
-rw-r--r--pkg/sentry/platform/kvm/machine_arm64_unsafe.go14
-rw-r--r--pkg/sentry/platform/kvm/machine_unsafe.go6
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil.go42
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_amd64.go11
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_amd64.s57
-rw-r--r--pkg/sentry/platform/kvm/testutil/testutil_arm64.go1
-rw-r--r--pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go1
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_amd64.go2
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_arm64.go1
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_linux.go14
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go2
-rw-r--r--pkg/sentry/platform/ptrace/subprocess_unsafe.go6
-rw-r--r--pkg/sentry/seccheck/BUILD54
-rw-r--r--pkg/sentry/seccheck/clone.go53
-rw-r--r--pkg/sentry/seccheck/seccheck.go136
-rw-r--r--pkg/sentry/seccheck/seccheck_test.go157
-rw-r--r--pkg/sentry/seccheck/task.go39
-rw-r--r--pkg/sentry/socket/control/BUILD2
-rw-r--r--pkg/sentry/socket/control/control.go40
-rw-r--r--pkg/sentry/socket/control/control_vfs2.go4
-rw-r--r--pkg/sentry/socket/hostinet/BUILD2
-rw-r--r--pkg/sentry/socket/hostinet/socket.go205
-rw-r--r--pkg/sentry/socket/hostinet/socket_unsafe.go22
-rw-r--r--pkg/sentry/socket/hostinet/socket_vfs2.go10
-rw-r--r--pkg/sentry/socket/hostinet/sockopt_impl.go3
-rw-r--r--pkg/sentry/socket/hostinet/stack.go47
-rw-r--r--pkg/sentry/socket/netfilter/BUILD1
-rw-r--r--pkg/sentry/socket/netfilter/extensions.go16
-rw-r--r--pkg/sentry/socket/netfilter/ipv4.go11
-rw-r--r--pkg/sentry/socket/netfilter/ipv6.go11
-rw-r--r--pkg/sentry/socket/netfilter/netfilter.go18
-rw-r--r--pkg/sentry/socket/netfilter/owner_matcher.go24
-rw-r--r--pkg/sentry/socket/netfilter/targets.go5
-rw-r--r--pkg/sentry/socket/netfilter/tcp_matcher.go5
-rw-r--r--pkg/sentry/socket/netfilter/udp_matcher.go5
-rw-r--r--pkg/sentry/socket/netlink/BUILD3
-rw-r--r--pkg/sentry/socket/netlink/route/protocol.go43
-rw-r--r--pkg/sentry/socket/netlink/socket.go13
-rw-r--r--pkg/sentry/socket/netlink/socket_vfs2.go12
-rw-r--r--pkg/sentry/socket/netstack/BUILD4
-rw-r--r--pkg/sentry/socket/netstack/netstack.go137
-rw-r--r--pkg/sentry/socket/netstack/netstack_vfs2.go12
-rw-r--r--pkg/sentry/socket/netstack/stack.go20
-rw-r--r--pkg/sentry/socket/netstack/tun.go4
-rw-r--r--pkg/sentry/socket/socket.go6
-rw-r--r--pkg/sentry/socket/unix/BUILD2
-rw-r--r--pkg/sentry/socket/unix/transport/connectioned.go9
-rw-r--r--pkg/sentry/socket/unix/transport/connectionless.go5
-rw-r--r--pkg/sentry/socket/unix/transport/queue.go2
-rw-r--r--pkg/sentry/socket/unix/unix.go16
-rw-r--r--pkg/sentry/socket/unix/unix_vfs2.go12
-rw-r--r--pkg/sentry/state/BUILD2
-rw-r--r--pkg/sentry/state/state.go8
-rw-r--r--pkg/sentry/state/state_metadata.go3
-rw-r--r--pkg/sentry/strace/linux64_amd64.go1
-rw-r--r--pkg/sentry/strace/linux64_arm64.go1
-rw-r--r--pkg/sentry/strace/strace.go9
-rw-r--r--pkg/sentry/syscalls/BUILD2
-rw-r--r--pkg/sentry/syscalls/epoll.go26
-rw-r--r--pkg/sentry/syscalls/linux/BUILD5
-rw-r--r--pkg/sentry/syscalls/linux/error.go37
-rw-r--r--pkg/sentry/syscalls/linux/linux64.go240
-rw-r--r--pkg/sentry/syscalls/linux/sigset.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_aio.go39
-rw-r--r--pkg/sentry/syscalls/linux/sys_capability.go14
-rw-r--r--pkg/sentry/syscalls/linux/sys_clone_amd64.go1
-rw-r--r--pkg/sentry/syscalls/linux/sys_clone_arm64.go1
-rw-r--r--pkg/sentry/syscalls/linux/sys_epoll.go11
-rw-r--r--pkg/sentry/syscalls/linux/sys_eventfd.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_file.go265
-rw-r--r--pkg/sentry/syscalls/linux/sys_futex.go29
-rw-r--r--pkg/sentry/syscalls/linux/sys_getdents.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_identity.go8
-rw-r--r--pkg/sentry/syscalls/linux/sys_inotify.go12
-rw-r--r--pkg/sentry/syscalls/linux/sys_lseek.go8
-rw-r--r--pkg/sentry/syscalls/linux/sys_membarrier.go30
-rw-r--r--pkg/sentry/syscalls/linux/sys_mempolicy.go38
-rw-r--r--pkg/sentry/syscalls/linux/sys_mmap.go44
-rw-r--r--pkg/sentry/syscalls/linux/sys_mount.go19
-rw-r--r--pkg/sentry/syscalls/linux/sys_msgqueue.go193
-rw-r--r--pkg/sentry/syscalls/linux/sys_pipe.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_poll.go33
-rw-r--r--pkg/sentry/syscalls/linux/sys_prctl.go48
-rw-r--r--pkg/sentry/syscalls/linux/sys_random.go9
-rw-r--r--pkg/sentry/syscalls/linux/sys_read.go76
-rw-r--r--pkg/sentry/syscalls/linux/sys_rlimit.go24
-rw-r--r--pkg/sentry/syscalls/linux/sys_rseq.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_rusage.go4
-rw-r--r--pkg/sentry/syscalls/linux/sys_sched.go22
-rw-r--r--pkg/sentry/syscalls/linux/sys_seccomp.go8
-rw-r--r--pkg/sentry/syscalls/linux/sys_sem.go104
-rw-r--r--pkg/sentry/syscalls/linux/sys_shm.go23
-rw-r--r--pkg/sentry/syscalls/linux/sys_signal.go69
-rw-r--r--pkg/sentry/syscalls/linux/sys_socket.go136
-rw-r--r--pkg/sentry/syscalls/linux/sys_splice.go58
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat.go20
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat_amd64.go1
-rw-r--r--pkg/sentry/syscalls/linux/sys_stat_arm64.go1
-rw-r--r--pkg/sentry/syscalls/linux/sys_sync.go23
-rw-r--r--pkg/sentry/syscalls/linux/sys_syslog.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_thread.go143
-rw-r--r--pkg/sentry/syscalls/linux/sys_time.go30
-rw-r--r--pkg/sentry/syscalls/linux/sys_timer.go6
-rw-r--r--pkg/sentry/syscalls/linux/sys_timerfd.go16
-rw-r--r--pkg/sentry/syscalls/linux/sys_tls_amd64.go11
-rw-r--r--pkg/sentry/syscalls/linux/sys_tls_arm64.go7
-rw-r--r--pkg/sentry/syscalls/linux/sys_utsname.go10
-rw-r--r--pkg/sentry/syscalls/linux/sys_write.go68
-rw-r--r--pkg/sentry/syscalls/linux/sys_xattr.go44
-rw-r--r--pkg/sentry/syscalls/linux/timespec.go12
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/BUILD2
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/aio.go25
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/epoll.go24
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/eventfd.go4
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/execve.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/fd.go60
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/filesystem.go13
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/fscontext.go6
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/getdents.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/inotify.go10
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/ioctl.go8
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/lock.go6
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/memfd.go4
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mmap.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/mount.go13
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/path.go9
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/pipe.go7
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/poll.go40
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/read_write.go106
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/setstat.go51
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/signal.go11
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/socket.go139
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/splice.go82
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/stat.go27
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/stat_amd64.go1
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/stat_arm64.go1
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/sync.go19
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/timerfd.go16
-rw-r--r--pkg/sentry/syscalls/linux/vfs2/xattr.go33
-rw-r--r--pkg/sentry/syscalls/syscalls.go8
-rw-r--r--pkg/sentry/time/BUILD4
-rw-r--r--pkg/sentry/time/calibrated_clock.go4
-rw-r--r--pkg/sentry/time/calibrated_clock_test.go1
-rw-r--r--pkg/sentry/time/sampler.go7
-rw-r--r--pkg/sentry/time/sampler_amd64.go (renamed from pkg/sentry/fsimpl/ext/disklayout/test_utils.go)25
-rw-r--r--pkg/sentry/time/sampler_arm64.go43
-rw-r--r--pkg/sentry/time/tsc_arm64.s6
-rw-r--r--pkg/sentry/usage/memory.go2
-rw-r--r--pkg/sentry/usage/memory_unsafe.go6
-rw-r--r--pkg/sentry/vfs/BUILD4
-rw-r--r--pkg/sentry/vfs/README.md4
-rw-r--r--pkg/sentry/vfs/anonfs.go70
-rw-r--r--pkg/sentry/vfs/dentry.go29
-rw-r--r--pkg/sentry/vfs/device.go6
-rw-r--r--pkg/sentry/vfs/epoll.go10
-rw-r--r--pkg/sentry/vfs/file_description.go30
-rw-r--r--pkg/sentry/vfs/file_description_impl_util.go74
-rw-r--r--pkg/sentry/vfs/file_description_impl_util_test.go16
-rw-r--r--pkg/sentry/vfs/inotify.go22
-rw-r--r--pkg/sentry/vfs/lock.go10
-rw-r--r--pkg/sentry/vfs/memxattr/BUILD2
-rw-r--r--pkg/sentry/vfs/memxattr/xattr.go16
-rw-r--r--pkg/sentry/vfs/mount.go28
-rw-r--r--pkg/sentry/vfs/opath.go32
-rw-r--r--pkg/sentry/vfs/pathname.go4
-rw-r--r--pkg/sentry/vfs/permissions.go36
-rw-r--r--pkg/sentry/vfs/resolving_path.go8
-rw-r--r--pkg/sentry/vfs/vfs.go82
-rw-r--r--pkg/sentry/watchdog/watchdog.go7
526 files changed, 8580 insertions, 11847 deletions
diff --git a/pkg/sentry/arch/BUILD b/pkg/sentry/arch/BUILD
index 61dacd2fb..e0dbc436d 100644
--- a/pkg/sentry/arch/BUILD
+++ b/pkg/sentry/arch/BUILD
@@ -28,13 +28,13 @@ go_library(
"//pkg/abi/linux",
"//pkg/context",
"//pkg/cpuid",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/marshal",
"//pkg/marshal/primitive",
"//pkg/sentry/arch/fpu",
"//pkg/sentry/limits",
- "//pkg/syserror",
"//pkg/usermem",
"@org_golang_x_sys//unix:go_default_library",
],
diff --git a/pkg/sentry/arch/arch_aarch64.go b/pkg/sentry/arch/arch_aarch64.go
index 08789f517..9a827e84f 100644
--- a/pkg/sentry/arch/arch_aarch64.go
+++ b/pkg/sentry/arch/arch_aarch64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package arch
@@ -22,10 +23,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
rpb "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Registers represents the CPU registers for this architecture.
@@ -233,11 +234,11 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,
switch regset {
case _NT_PRSTATUS:
if maxlen < ptraceRegistersSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return s.PtraceGetRegs(dst)
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
@@ -246,11 +247,11 @@ func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int,
switch regset {
case _NT_PRSTATUS:
if maxlen < ptraceRegistersSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return s.PtraceSetRegs(src)
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go
index d6b4d2357..e7cb24102 100644
--- a/pkg/sentry/arch/arch_amd64.go
+++ b/pkg/sentry/arch/arch_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package arch
diff --git a/pkg/sentry/arch/arch_arm64.go b/pkg/sentry/arch/arch_arm64.go
index 348f238fd..0d27a1f22 100644
--- a/pkg/sentry/arch/arch_arm64.go
+++ b/pkg/sentry/arch/arch_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package arch
diff --git a/pkg/sentry/arch/arch_state_x86.go b/pkg/sentry/arch/arch_state_x86.go
index b2b94c304..6da13f26e 100644
--- a/pkg/sentry/arch/arch_state_x86.go
+++ b/pkg/sentry/arch/arch_state_x86.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64 || 386
// +build amd64 386
package arch
diff --git a/pkg/sentry/arch/arch_x86.go b/pkg/sentry/arch/arch_x86.go
index e8e52d3a8..96e9a6949 100644
--- a/pkg/sentry/arch/arch_x86.go
+++ b/pkg/sentry/arch/arch_x86.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64 || 386
// +build amd64 386
package arch
@@ -23,10 +24,10 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch/fpu"
rpb "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Registers represents the CPU registers for this architecture.
@@ -353,7 +354,7 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,
switch regset {
case _NT_PRSTATUS:
if maxlen < ptraceRegistersSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return s.PtraceGetRegs(dst)
case _NT_PRFPREG:
@@ -361,7 +362,7 @@ func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int,
case _NT_X86_XSTATE:
return s.fpState.PtraceGetXstateRegs(dst, maxlen, s.FeatureSet)
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
@@ -370,7 +371,7 @@ func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int,
switch regset {
case _NT_PRSTATUS:
if maxlen < ptraceRegistersSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return s.PtraceSetRegs(src)
case _NT_PRFPREG:
@@ -378,7 +379,7 @@ func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int,
case _NT_X86_XSTATE:
return s.fpState.PtraceSetXstateRegs(src, maxlen, s.FeatureSet)
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/arch/arch_x86_impl.go b/pkg/sentry/arch/arch_x86_impl.go
index 5d7b99bd9..bb5ff7f7f 100644
--- a/pkg/sentry/arch/arch_x86_impl.go
+++ b/pkg/sentry/arch/arch_x86_impl.go
@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build (amd64 || 386) && go1.1
// +build amd64 386
+// +build go1.1
package arch
diff --git a/pkg/sentry/arch/fpu/BUILD b/pkg/sentry/arch/fpu/BUILD
index 4e4f20639..1f371e513 100644
--- a/pkg/sentry/arch/fpu/BUILD
+++ b/pkg/sentry/arch/fpu/BUILD
@@ -9,13 +9,14 @@ go_library(
"fpu_amd64.go",
"fpu_amd64.s",
"fpu_arm64.go",
+ "fpu_unsafe.go",
],
visibility = ["//:sandbox"],
deps = [
"//pkg/cpuid",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sync",
- "//pkg/syserror",
"@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/arch/fpu/fpu.go b/pkg/sentry/arch/fpu/fpu.go
index 867d309a3..62bde19d3 100644
--- a/pkg/sentry/arch/fpu/fpu.go
+++ b/pkg/sentry/arch/fpu/fpu.go
@@ -17,7 +17,6 @@ package fpu
import (
"fmt"
- "reflect"
)
// State represents floating point state.
@@ -40,15 +39,3 @@ type ErrLoadingState struct {
func (e ErrLoadingState) Error() string {
return fmt.Sprintf("floating point state contains unsupported features; supported: %#x saved: %#x", e.supportedFeatures, e.savedFeatures)
}
-
-// alignedBytes returns a slice of size bytes, aligned in memory to the given
-// alignment. This is used because we require certain structures to be aligned
-// in a specific way (for example, the X86 floating point data).
-func alignedBytes(size, alignment uint) []byte {
- data := make([]byte, size+alignment-1)
- offset := uint(reflect.ValueOf(data).Index(0).Addr().Pointer() % uintptr(alignment))
- if offset == 0 {
- return data[:size:size]
- }
- return data[alignment-offset:][:size:size]
-}
diff --git a/pkg/sentry/arch/fpu/fpu_amd64.go b/pkg/sentry/arch/fpu/fpu_amd64.go
index f0ba26736..e422f67a1 100644
--- a/pkg/sentry/arch/fpu/fpu_amd64.go
+++ b/pkg/sentry/arch/fpu/fpu_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64 || i386
// +build amd64 i386
package fpu
@@ -21,9 +22,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// initX86FPState (defined in asm files) sets up initial state.
@@ -70,7 +71,7 @@ const ptraceFPRegsSize = 512
// PtraceGetFPRegs implements Context.PtraceGetFPRegs.
func (s *State) PtraceGetFPRegs(dst io.Writer, maxlen int) (int, error) {
if maxlen < ptraceFPRegsSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return dst.Write((*s)[:ptraceFPRegsSize])
@@ -79,7 +80,7 @@ func (s *State) PtraceGetFPRegs(dst io.Writer, maxlen int) (int, error) {
// PtraceSetFPRegs implements Context.PtraceSetFPRegs.
func (s *State) PtraceSetFPRegs(src io.Reader, maxlen int) (int, error) {
if maxlen < ptraceFPRegsSize {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
var f [ptraceFPRegsSize]byte
diff --git a/pkg/sentry/arch/fpu/fpu_arm64.go b/pkg/sentry/arch/fpu/fpu_arm64.go
index 46634661f..49e641722 100644
--- a/pkg/sentry/arch/fpu/fpu_arm64.go
+++ b/pkg/sentry/arch/fpu/fpu_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package fpu
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go b/pkg/sentry/arch/fpu/fpu_unsafe.go
index b734b6987..c91dc99be 100644
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_test.go
+++ b/pkg/sentry/arch/fpu/fpu_unsafe.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The gVisor Authors.
+// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,19 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package disklayout
+package fpu
import (
- "testing"
+ "unsafe"
)
-// TestSuperBlockSize tests that the superblock structs are of the correct
-// size.
-func TestSuperBlockSize(t *testing.T) {
- var sbOld SuperBlockOld
- assertSize(t, &sbOld, 84)
- var sb32 SuperBlock32Bit
- assertSize(t, &sb32, 336)
- var sb64 SuperBlock64Bit
- assertSize(t, &sb64, 1024)
+// alignedBytes returns a slice of size bytes, aligned in memory to the given
+// alignment. This is used because we require certain structures to be aligned
+// in a specific way (for example, the X86 floating point data).
+func alignedBytes(size, alignment uint) []byte {
+ data := make([]byte, size+alignment-1)
+ offset := uint(uintptr(unsafe.Pointer(&data[0])) % uintptr(alignment))
+ if offset == 0 {
+ return data[:size:size]
+ }
+ return data[alignment-offset:][:size:size]
}
diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go
index 58e28dbba..dbd4336f9 100644
--- a/pkg/sentry/arch/signal_amd64.go
+++ b/pkg/sentry/arch/signal_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package arch
diff --git a/pkg/sentry/arch/signal_arm64.go b/pkg/sentry/arch/signal_arm64.go
index 80df90076..ee22ec512 100644
--- a/pkg/sentry/arch/signal_arm64.go
+++ b/pkg/sentry/arch/signal_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package arch
diff --git a/pkg/sentry/arch/syscalls_amd64.go b/pkg/sentry/arch/syscalls_amd64.go
index 3859f41ee..c021ba072 100644
--- a/pkg/sentry/arch/syscalls_amd64.go
+++ b/pkg/sentry/arch/syscalls_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package arch
diff --git a/pkg/sentry/arch/syscalls_arm64.go b/pkg/sentry/arch/syscalls_arm64.go
index 95dfd1e90..7146c9e44 100644
--- a/pkg/sentry/arch/syscalls_arm64.go
+++ b/pkg/sentry/arch/syscalls_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package arch
diff --git a/pkg/sentry/control/BUILD b/pkg/sentry/control/BUILD
index deaf5fa23..cfb33a398 100644
--- a/pkg/sentry/control/BUILD
+++ b/pkg/sentry/control/BUILD
@@ -1,21 +1,33 @@
-load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools:defs.bzl", "go_library", "go_test", "proto_library")
package(licenses = ["notice"])
+proto_library(
+ name = "control",
+ srcs = ["control.proto"],
+ visibility = ["//visibility:public"],
+)
+
go_library(
name = "control",
srcs = [
"control.go",
+ "events.go",
+ "fs.go",
+ "lifecycle.go",
"logging.go",
"pprof.go",
"proc.go",
"state.go",
+ "usage.go",
],
visibility = [
"//:sandbox",
],
deps = [
"//pkg/abi/linux",
+ "//pkg/context",
+ "//pkg/eventchannel",
"//pkg/fd",
"//pkg/log",
"//pkg/sentry/fdimport",
@@ -35,6 +47,8 @@ go_library(
"//pkg/sync",
"//pkg/tcpip/link/sniffer",
"//pkg/urpc",
+ "//pkg/usermem",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/control/control.proto b/pkg/sentry/control/control.proto
new file mode 100644
index 000000000..72dda3fbc
--- /dev/null
+++ b/pkg/sentry/control/control.proto
@@ -0,0 +1,40 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package gvisor;
+
+// ControlConfig configures the permission of controls.
+message ControlConfig {
+ // Names for individual control URPC service objects.
+ // Any new service object that should be given conditional access should be
+ // named here and conditionally added based on presence in allowed_controls.
+ enum Endpoint {
+ UNKNOWN = 0;
+ EVENTS = 1;
+ FS = 2;
+ LIFECYCLE = 3;
+ LOGGING = 4;
+ PROFILE = 5;
+ USAGE = 6;
+ PROC = 7;
+ STATE = 8;
+ DEBUG = 9;
+ }
+
+ // allowed_controls represents which endpoints may be registered to the
+ // server.
+ repeated Endpoint allowed_controls = 1;
+}
diff --git a/pkg/sentry/control/events.go b/pkg/sentry/control/events.go
new file mode 100644
index 000000000..92e437ae7
--- /dev/null
+++ b/pkg/sentry/control/events.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package control
+
+import (
+ "errors"
+ "fmt"
+
+ "gvisor.dev/gvisor/pkg/eventchannel"
+ "gvisor.dev/gvisor/pkg/urpc"
+)
+
+// EventsOpts are the arguments for eventchannel-related commands.
+type EventsOpts struct {
+ urpc.FilePayload
+}
+
+// Events is the control server state for eventchannel-related commands.
+type Events struct {
+ emitter eventchannel.Emitter
+}
+
+// AttachDebugEmitter receives a connected unix domain socket FD from the client
+// and establishes it as a new emitter for the sentry eventchannel. Any existing
+// emitters are replaced on a subsequent attach.
+func (e *Events) AttachDebugEmitter(o *EventsOpts, _ *struct{}) error {
+ if len(o.FilePayload.Files) < 1 {
+ return errors.New("no output writer provided")
+ }
+
+ sock, err := o.ReleaseFD(0)
+ if err != nil {
+ return err
+ }
+ sockFD := sock.Release()
+
+ // SocketEmitter takes ownership of sockFD.
+ emitter, err := eventchannel.SocketEmitter(sockFD)
+ if err != nil {
+ return fmt.Errorf("failed to create SocketEmitter for FD %d: %v", sockFD, err)
+ }
+
+ // If there is already a debug emitter, close the old one.
+ if e.emitter != nil {
+ e.emitter.Close()
+ }
+
+ e.emitter = eventchannel.DebugEmitterFrom(emitter)
+
+ // Register the new stream destination.
+ eventchannel.AddEmitter(e.emitter)
+ return nil
+}
diff --git a/pkg/sentry/control/fs.go b/pkg/sentry/control/fs.go
new file mode 100644
index 000000000..d19b21f2d
--- /dev/null
+++ b/pkg/sentry/control/fs.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package control
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/sentry/fs"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/urpc"
+ "gvisor.dev/gvisor/pkg/usermem"
+)
+
+// CatOpts contains options for the Cat RPC call.
+type CatOpts struct {
+ // Files are the filesystem paths for the files to cat.
+ Files []string `json:"files"`
+
+ // FilePayload contains the destination for output.
+ urpc.FilePayload
+}
+
+// Fs includes fs-related functions.
+type Fs struct {
+ Kernel *kernel.Kernel
+}
+
+// Cat is a RPC stub which prints out and returns the content of the files.
+func (f *Fs) Cat(o *CatOpts, _ *struct{}) error {
+ // Create an output stream.
+ if len(o.FilePayload.Files) != 1 {
+ return ErrInvalidFiles
+ }
+
+ output := o.FilePayload.Files[0]
+ for _, file := range o.Files {
+ if err := cat(f.Kernel, file, output); err != nil {
+ return fmt.Errorf("cannot read from file %s: %v", file, err)
+ }
+ }
+
+ return nil
+}
+
+// fileReader encapsulates a fs.File and provides an io.Reader interface.
+type fileReader struct {
+ ctx context.Context
+ file *fs.File
+}
+
+// Read implements io.Reader.Read.
+func (f *fileReader) Read(p []byte) (int, error) {
+ n, err := f.file.Readv(f.ctx, usermem.BytesIOSequence(p))
+ return int(n), err
+}
+
+func cat(k *kernel.Kernel, path string, output *os.File) error {
+ ctx := k.SupervisorContext()
+ mns := k.GlobalInit().Leader().MountNamespace()
+ root := mns.Root()
+ defer root.DecRef(ctx)
+
+ remainingTraversals := uint(fs.DefaultTraversalLimit)
+ d, err := mns.FindInode(ctx, root, nil, path, &remainingTraversals)
+ if err != nil {
+ return fmt.Errorf("cannot find file %s: %v", path, err)
+ }
+ defer d.DecRef(ctx)
+
+ file, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true})
+ if err != nil {
+ return fmt.Errorf("cannot get file for path %s: %v", path, err)
+ }
+ defer file.DecRef(ctx)
+
+ _, err = io.Copy(output, &fileReader{ctx: ctx, file: file})
+ return err
+}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go b/pkg/sentry/control/lifecycle.go
index c96002e19..67abf497d 100644
--- a/pkg/sentry/fsimpl/ext/disklayout/extent_test.go
+++ b/pkg/sentry/control/lifecycle.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The gVisor Authors.
+// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,19 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package disklayout
+package control
import (
- "testing"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
)
-// TestExtentSize tests that the extent structs are of the correct
-// size.
-func TestExtentSize(t *testing.T) {
- var h ExtentHeader
- assertSize(t, &h, ExtentHeaderSize)
- var i ExtentIdx
- assertSize(t, &i, ExtentEntrySize)
- var e Extent
- assertSize(t, &e, ExtentEntrySize)
+// Lifecycle provides functions related to starting and stopping tasks.
+type Lifecycle struct {
+ Kernel *kernel.Kernel
+}
+
+// Pause pauses all tasks, blocking until they are stopped.
+func (l *Lifecycle) Pause(_, _ *struct{}) error {
+ l.Kernel.Pause()
+ return nil
+}
+
+// Resume resumes all tasks.
+func (l *Lifecycle) Resume(_, _ *struct{}) error {
+ l.Kernel.Unpause()
+ return nil
}
diff --git a/pkg/sentry/control/logging.go b/pkg/sentry/control/logging.go
index 8a500a515..7613dfcbc 100644
--- a/pkg/sentry/control/logging.go
+++ b/pkg/sentry/control/logging.go
@@ -50,20 +50,20 @@ type LoggingArgs struct {
// enable strace at all. If this flag is false then a completely
// pristine copy of the syscall table will be swapped in. This
// approach is used to remain consistent with an empty strace
- // whitelist meaning trace all system calls.
+ // allowlist meaning trace all system calls.
EnableStrace bool
- // Strace is the whitelist of syscalls to trace to log. If this
- // and StraceEventWhitelist are empty trace all system calls.
- StraceWhitelist []string
+ // Strace is the allowlist of syscalls to trace to log. If this
+ // and StraceEventAllowlist are empty trace all system calls.
+ StraceAllowlist []string
// SetEventStrace is a flag used to indicate that event strace
// related arguments were passed in.
SetEventStrace bool
- // StraceEventWhitelist is the whitelist of syscalls to trace
+ // StraceEventAllowlist is the allowlist of syscalls to trace
// to event log.
- StraceEventWhitelist []string
+ StraceEventAllowlist []string
}
// Logging provides functions related to logging.
@@ -107,13 +107,13 @@ func (l *Logging) Change(args *LoggingArgs, code *int) error {
func (l *Logging) configureStrace(args *LoggingArgs) error {
if args.EnableStrace {
- // Install the whitelist specified.
- if len(args.StraceWhitelist) > 0 {
- if err := strace.Enable(args.StraceWhitelist, strace.SinkTypeLog); err != nil {
+ // Install the allowlist specified.
+ if len(args.StraceAllowlist) > 0 {
+ if err := strace.Enable(args.StraceAllowlist, strace.SinkTypeLog); err != nil {
return err
}
} else {
- // For convenience, if strace is enabled but whitelist
+ // For convenience, if strace is enabled but allowlist
// is empty, enable everything to log.
strace.EnableAll(strace.SinkTypeLog)
}
@@ -125,8 +125,8 @@ func (l *Logging) configureStrace(args *LoggingArgs) error {
}
func (l *Logging) configureEventStrace(args *LoggingArgs) error {
- if len(args.StraceEventWhitelist) > 0 {
- if err := strace.Enable(args.StraceEventWhitelist, strace.SinkTypeEvent); err != nil {
+ if len(args.StraceEventAllowlist) > 0 {
+ if err := strace.Enable(args.StraceEventAllowlist, strace.SinkTypeEvent); err != nil {
return err
}
} else {
diff --git a/pkg/sentry/control/pprof.go b/pkg/sentry/control/pprof.go
index 2f3664c57..f721b7236 100644
--- a/pkg/sentry/control/pprof.go
+++ b/pkg/sentry/control/pprof.go
@@ -26,6 +26,23 @@ import (
"gvisor.dev/gvisor/pkg/urpc"
)
+const (
+ // DefaultBlockProfileRate is the default profiling rate for block
+ // profiles.
+ //
+ // The default here is 10%, which will record a stacktrace 10% of the
+ // time when blocking occurs. Since these events should not be super
+ // frequent, we expect this to achieve a reasonable balance between
+ // collecting the data we need and imposing a high performance cost
+ // (e.g. skewing even the CPU profile).
+ DefaultBlockProfileRate = 10
+
+ // DefaultMutexProfileRate is the default profiling rate for mutex
+ // profiles. Like the block rate above, we use a default rate of 10%
+ // for the same reasons.
+ DefaultMutexProfileRate = 10
+)
+
// Profile includes profile-related RPC stubs. It provides a way to
// control the built-in runtime profiling facilities.
//
@@ -175,12 +192,8 @@ func (p *Profile) Block(o *BlockProfileOpts, _ *struct{}) error {
defer p.blockMu.Unlock()
// Always set the rate. We then wait to collect a profile at this rate,
- // and disable when we're done. Note that the default here is 10%, which
- // will record a stacktrace 10% of the time when blocking occurs. Since
- // these events should not be super frequent, we expect this to achieve
- // a reasonable balance between collecting the data we need and imposing
- // a high performance cost (e.g. skewing even the CPU profile).
- rate := 10
+ // and disable when we're done.
+ rate := DefaultBlockProfileRate
if o.Rate != 0 {
rate = o.Rate
}
@@ -220,9 +233,8 @@ func (p *Profile) Mutex(o *MutexProfileOpts, _ *struct{}) error {
p.mutexMu.Lock()
defer p.mutexMu.Unlock()
- // Always set the fraction. Like the block rate above, we use
- // a default rate of 10% for the same reasons.
- fraction := 10
+ // Always set the fraction.
+ fraction := DefaultMutexProfileRate
if o.Fraction != 0 {
fraction = o.Fraction
}
diff --git a/pkg/sentry/control/proc.go b/pkg/sentry/control/proc.go
index 221e98a01..6352ea71a 100644
--- a/pkg/sentry/control/proc.go
+++ b/pkg/sentry/control/proc.go
@@ -126,7 +126,7 @@ func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error {
// Wait for completion.
newTG.WaitExited()
- *waitStatus = newTG.ExitStatus().Status()
+ *waitStatus = uint32(newTG.ExitStatus())
return nil
}
@@ -223,7 +223,7 @@ func (proc *Proc) execAsync(args *ExecArgs) (*kernel.ThreadGroup, kernel.ThreadI
_ = fd.Close()
}
}()
- ttyFile, ttyFileVFS2, err := fdimport.Import(ctx, fdTable, args.StdioIsPty, fds)
+ ttyFile, ttyFileVFS2, err := fdimport.Import(ctx, fdTable, args.StdioIsPty, args.KUID, args.KGID, fds)
if err != nil {
return nil, 0, nil, nil, err
}
diff --git a/pkg/sentry/control/state.go b/pkg/sentry/control/state.go
index 62eaca965..4c83b8e8e 100644
--- a/pkg/sentry/control/state.go
+++ b/pkg/sentry/control/state.go
@@ -17,6 +17,7 @@ package control
import (
"errors"
+ "gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/state"
@@ -67,7 +68,7 @@ func (s *State) Save(o *SaveOpts, _ *struct{}) error {
log.Warningf("Save failed: exiting...")
s.Kernel.SetSaveError(err)
}
- s.Kernel.Kill(kernel.ExitStatus{})
+ s.Kernel.Kill(linux.WaitStatusExit(0))
},
}
return saveOpts.Save(s.Kernel.SupervisorContext(), s.Kernel, s.Watchdog)
diff --git a/pkg/sentry/control/usage.go b/pkg/sentry/control/usage.go
new file mode 100644
index 000000000..cc78d3f45
--- /dev/null
+++ b/pkg/sentry/control/usage.go
@@ -0,0 +1,183 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package control
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/usage"
+ "gvisor.dev/gvisor/pkg/urpc"
+)
+
+// Usage includes usage-related RPC stubs.
+type Usage struct {
+ Kernel *kernel.Kernel
+}
+
+// MemoryUsageOpts contains usage options.
+type MemoryUsageOpts struct {
+ // Full indicates that a full accounting should be done. If Full is not
+ // specified, then a partial accounting will be done, and Unknown will
+ // contain a majority of memory. See Collect for more information.
+ Full bool `json:"Full"`
+}
+
+// MemoryUsage is a memory usage structure.
+type MemoryUsage struct {
+ Unknown uint64 `json:"Unknown"`
+ System uint64 `json:"System"`
+ Anonymous uint64 `json:"Anonymous"`
+ PageCache uint64 `json:"PageCache"`
+ Mapped uint64 `json:"Mapped"`
+ Tmpfs uint64 `json:"Tmpfs"`
+ Ramdiskfs uint64 `json:"Ramdiskfs"`
+ Total uint64 `json:"Total"`
+}
+
+// MemoryUsageFileOpts contains usage file options.
+type MemoryUsageFileOpts struct {
+ // Version is used to ensure both sides agree on the format of the
+ // shared memory buffer.
+ Version uint64 `json:"Version"`
+}
+
+// MemoryUsageFile contains the file handle to the usage file.
+type MemoryUsageFile struct {
+ urpc.FilePayload
+}
+
+// UsageFD returns the file that tracks the memory usage of the application.
+func (u *Usage) UsageFD(opts *MemoryUsageFileOpts, out *MemoryUsageFile) error {
+ // Only support version 1 for now.
+ if opts.Version != 1 {
+ return fmt.Errorf("unsupported version requested: %d", opts.Version)
+ }
+
+ mf := u.Kernel.MemoryFile()
+ *out = MemoryUsageFile{
+ FilePayload: urpc.FilePayload{
+ Files: []*os.File{
+ usage.MemoryAccounting.File,
+ mf.File(),
+ },
+ },
+ }
+
+ return nil
+}
+
+// Collect returns memory used by the sandboxed application.
+func (u *Usage) Collect(opts *MemoryUsageOpts, out *MemoryUsage) error {
+ if opts.Full {
+ // Ensure everything is up to date.
+ if err := u.Kernel.MemoryFile().UpdateUsage(); err != nil {
+ return err
+ }
+
+ // Copy out a snapshot.
+ snapshot, total := usage.MemoryAccounting.Copy()
+ *out = MemoryUsage{
+ System: snapshot.System,
+ Anonymous: snapshot.Anonymous,
+ PageCache: snapshot.PageCache,
+ Mapped: snapshot.Mapped,
+ Tmpfs: snapshot.Tmpfs,
+ Ramdiskfs: snapshot.Ramdiskfs,
+ Total: total,
+ }
+ } else {
+ // Get total usage from the MemoryFile implementation.
+ total, err := u.Kernel.MemoryFile().TotalUsage()
+ if err != nil {
+ return err
+ }
+
+ // The memory accounting is guaranteed to be accurate only when
+ // UpdateUsage is called. If UpdateUsage is not called, then only Mapped
+ // will be up-to-date.
+ snapshot, _ := usage.MemoryAccounting.Copy()
+ *out = MemoryUsage{
+ Unknown: total,
+ Mapped: snapshot.Mapped,
+ Total: total + snapshot.Mapped,
+ }
+
+ }
+
+ return nil
+}
+
+// UsageReduceOpts contains options to Usage.Reduce().
+type UsageReduceOpts struct {
+ // If Wait is true, Reduce blocks until all activity initiated by
+ // Usage.Reduce() has completed.
+ Wait bool `json:"wait"`
+}
+
+// UsageReduceOutput contains output from Usage.Reduce().
+type UsageReduceOutput struct{}
+
+// Reduce requests that the sentry attempt to reduce its memory usage.
+func (u *Usage) Reduce(opts *UsageReduceOpts, out *UsageReduceOutput) error {
+ mf := u.Kernel.MemoryFile()
+ mf.StartEvictions()
+ if opts.Wait {
+ mf.WaitForEvictions()
+ }
+ return nil
+}
+
+// MemoryUsageRecord contains the mapping and platform memory file.
+type MemoryUsageRecord struct {
+ mmap uintptr
+ stats *usage.RTMemoryStats
+ mf os.File
+}
+
+// NewMemoryUsageRecord creates a new MemoryUsageRecord from usageFile and
+// platformFile.
+func NewMemoryUsageRecord(usageFile, platformFile os.File) (*MemoryUsageRecord, error) {
+ mmap, _, e := unix.RawSyscall6(unix.SYS_MMAP, 0, usage.RTMemoryStatsSize, unix.PROT_READ, unix.MAP_SHARED, usageFile.Fd(), 0)
+ if e != 0 {
+ return nil, fmt.Errorf("mmap returned %d, want 0", e)
+ }
+
+ m := MemoryUsageRecord{
+ mmap: mmap,
+ stats: usage.RTMemoryStatsPointer(mmap),
+ mf: platformFile,
+ }
+
+ runtime.SetFinalizer(&m, finalizer)
+ return &m, nil
+}
+
+func finalizer(m *MemoryUsageRecord) {
+ unix.RawSyscall(unix.SYS_MUNMAP, m.mmap, usage.RTMemoryStatsSize, 0)
+}
+
+// Fetch fetches the usage info from a MemoryUsageRecord.
+func (m *MemoryUsageRecord) Fetch() (mapped, unknown, total uint64, err error) {
+ var stat unix.Stat_t
+ if err := unix.Fstat(int(m.mf.Fd()), &stat); err != nil {
+ return 0, 0, 0, err
+ }
+ fmem := uint64(stat.Blocks) * 512
+ return m.stats.RTMapped, fmem, m.stats.RTMapped + fmem, nil
+}
diff --git a/pkg/sentry/devices/memdev/BUILD b/pkg/sentry/devices/memdev/BUILD
index 4c8604d58..66b9ed523 100644
--- a/pkg/sentry/devices/memdev/BUILD
+++ b/pkg/sentry/devices/memdev/BUILD
@@ -15,6 +15,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/rand",
"//pkg/safemem",
"//pkg/sentry/fsimpl/devtmpfs",
@@ -23,7 +24,6 @@ go_library(
"//pkg/sentry/kernel/auth",
"//pkg/sentry/memmap",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/devices/memdev/full.go b/pkg/sentry/devices/memdev/full.go
index fece3e762..fc702c9f6 100644
--- a/pkg/sentry/devices/memdev/full.go
+++ b/pkg/sentry/devices/memdev/full.go
@@ -16,8 +16,8 @@ package memdev
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -66,12 +66,12 @@ func (fd *fullFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.Rea
// PWrite implements vfs.FileDescriptionImpl.PWrite.
func (fd *fullFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.ENOSPC
+ return 0, linuxerr.ENOSPC
}
// Write implements vfs.FileDescriptionImpl.Write.
func (fd *fullFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.ENOSPC
+ return 0, linuxerr.ENOSPC
}
// Seek implements vfs.FileDescriptionImpl.Seek.
diff --git a/pkg/sentry/devices/quotedev/BUILD b/pkg/sentry/devices/quotedev/BUILD
deleted file mode 100644
index d09214e3e..000000000
--- a/pkg/sentry/devices/quotedev/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "quotedev",
- srcs = ["quotedev.go"],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/fsimpl/devtmpfs",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- ],
-)
diff --git a/pkg/sentry/devices/quotedev/quotedev.go b/pkg/sentry/devices/quotedev/quotedev.go
deleted file mode 100644
index 6114cb724..000000000
--- a/pkg/sentry/devices/quotedev/quotedev.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package quotedev implements a vfs.Device for /dev/gvisor_quote.
-package quotedev
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-const (
- quoteDevMinor = 0
-)
-
-// quoteDevice implements vfs.Device for /dev/gvisor_quote
-//
-// +stateify savable
-type quoteDevice struct{}
-
-// Open implements vfs.Device.Open.
-// TODO(b/157161182): Add support for attestation ioctls.
-func (quoteDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- return nil, syserror.EIO
-}
-
-// Register registers all devices implemented by this package in vfsObj.
-func Register(vfsObj *vfs.VirtualFilesystem) error {
- return vfsObj.RegisterDevice(vfs.CharDevice, linux.UNNAMED_MAJOR, quoteDevMinor, quoteDevice{}, &vfs.RegisterDeviceOptions{
- GroupName: "gvisor_quote",
- })
-}
-
-// CreateDevtmpfsFiles creates device special files in dev representing all
-// devices implemented by this package.
-func CreateDevtmpfsFiles(ctx context.Context, dev *devtmpfs.Accessor) error {
- return dev.CreateDeviceFile(ctx, "gvisor_quote", vfs.CharDevice, linux.UNNAMED_MAJOR, quoteDevMinor, 0666 /* mode */)
-}
diff --git a/pkg/sentry/devices/ttydev/BUILD b/pkg/sentry/devices/ttydev/BUILD
index b4b6ca38a..ab4cd0b33 100644
--- a/pkg/sentry/devices/ttydev/BUILD
+++ b/pkg/sentry/devices/ttydev/BUILD
@@ -9,8 +9,8 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/fsimpl/devtmpfs",
"//pkg/sentry/vfs",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/devices/ttydev/ttydev.go b/pkg/sentry/devices/ttydev/ttydev.go
index a287c65ca..29b79b5d6 100644
--- a/pkg/sentry/devices/ttydev/ttydev.go
+++ b/pkg/sentry/devices/ttydev/ttydev.go
@@ -18,9 +18,9 @@ package ttydev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -36,7 +36,7 @@ type ttyDevice struct{}
// Open implements vfs.Device.Open.
func (ttyDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
// Register registers all devices implemented by this package in vfsObj.
diff --git a/pkg/sentry/devices/tundev/BUILD b/pkg/sentry/devices/tundev/BUILD
index 8b38d574d..60c971030 100644
--- a/pkg/sentry/devices/tundev/BUILD
+++ b/pkg/sentry/devices/tundev/BUILD
@@ -9,6 +9,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/arch",
"//pkg/sentry/fsimpl/devtmpfs",
@@ -16,7 +17,6 @@ go_library(
"//pkg/sentry/kernel",
"//pkg/sentry/socket/netstack",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/tcpip/link/tun",
"//pkg/usermem",
"//pkg/waiter",
diff --git a/pkg/sentry/devices/tundev/tundev.go b/pkg/sentry/devices/tundev/tundev.go
index a12eeb8e7..b4e2a6d91 100644
--- a/pkg/sentry/devices/tundev/tundev.go
+++ b/pkg/sentry/devices/tundev/tundev.go
@@ -18,6 +18,7 @@ package tundev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket/netstack"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/link/tun"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -77,11 +77,11 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg
switch request {
case linux.TUNSETIFF:
if !t.HasCapability(linux.CAP_NET_ADMIN) {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
stack, ok := t.NetworkContext().(*netstack.Stack)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var req linux.IFReq
@@ -104,7 +104,7 @@ func (fd *tunFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArg
return 0, err
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fdimport/BUILD b/pkg/sentry/fdimport/BUILD
index 6b4f8b0ed..563e96e0d 100644
--- a/pkg/sentry/fdimport/BUILD
+++ b/pkg/sentry/fdimport/BUILD
@@ -15,6 +15,7 @@ go_library(
"//pkg/sentry/fs/host",
"//pkg/sentry/fsimpl/host",
"//pkg/sentry/kernel",
+ "//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
],
)
diff --git a/pkg/sentry/fdimport/fdimport.go b/pkg/sentry/fdimport/fdimport.go
index badd5b073..f2b9630eb 100644
--- a/pkg/sentry/fdimport/fdimport.go
+++ b/pkg/sentry/fdimport/fdimport.go
@@ -24,6 +24,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/host"
hostvfs2 "gvisor.dev/gvisor/pkg/sentry/fsimpl/host"
"gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
)
@@ -31,9 +32,9 @@ import (
// sets up TTY for the first 3 FDs in the slice representing stdin, stdout,
// stderr. Used FDs are either closed or released. It's safe for the caller to
// close any remaining files upon return.
-func Import(ctx context.Context, fdTable *kernel.FDTable, console bool, fds []*fd.FD) (*host.TTYFileOperations, *hostvfs2.TTYFileDescription, error) {
+func Import(ctx context.Context, fdTable *kernel.FDTable, console bool, uid auth.KUID, gid auth.KGID, fds []*fd.FD) (*host.TTYFileOperations, *hostvfs2.TTYFileDescription, error) {
if kernel.VFS2Enabled {
- ttyFile, err := importVFS2(ctx, fdTable, console, fds)
+ ttyFile, err := importVFS2(ctx, fdTable, console, uid, gid, fds)
return nil, ttyFile, err
}
ttyFile, err := importFS(ctx, fdTable, console, fds)
@@ -89,7 +90,7 @@ func importFS(ctx context.Context, fdTable *kernel.FDTable, console bool, fds []
return ttyFile.FileOperations.(*host.TTYFileOperations), nil
}
-func importVFS2(ctx context.Context, fdTable *kernel.FDTable, console bool, stdioFDs []*fd.FD) (*hostvfs2.TTYFileDescription, error) {
+func importVFS2(ctx context.Context, fdTable *kernel.FDTable, console bool, uid auth.KUID, gid auth.KGID, stdioFDs []*fd.FD) (*hostvfs2.TTYFileDescription, error) {
k := kernel.KernelFromContext(ctx)
if k == nil {
return nil, fmt.Errorf("cannot find kernel from context")
@@ -103,7 +104,13 @@ func importVFS2(ctx context.Context, fdTable *kernel.FDTable, console bool, stdi
// Import the file as a host TTY file.
if ttyFile == nil {
var err error
- appFile, err = hostvfs2.ImportFD(ctx, k.HostMount(), hostFD.FD(), true /* isTTY */)
+ appFile, err = hostvfs2.NewFD(ctx, k.HostMount(), hostFD.FD(), &hostvfs2.NewFDOptions{
+ Savable: true,
+ IsTTY: true,
+ VirtualOwner: true,
+ UID: uid,
+ GID: gid,
+ })
if err != nil {
return nil, err
}
@@ -121,7 +128,12 @@ func importVFS2(ctx context.Context, fdTable *kernel.FDTable, console bool, stdi
}
} else {
var err error
- appFile, err = hostvfs2.ImportFD(ctx, k.HostMount(), hostFD.FD(), false /* isTTY */)
+ appFile, err = hostvfs2.NewFD(ctx, k.HostMount(), hostFD.FD(), &hostvfs2.NewFDOptions{
+ Savable: true,
+ VirtualOwner: true,
+ UID: uid,
+ GID: gid,
+ })
if err != nil {
return nil, err
}
diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD
index 0dc100f9b..4e573d249 100644
--- a/pkg/sentry/fs/BUILD
+++ b/pkg/sentry/fs/BUILD
@@ -48,6 +48,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/amutex",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/p9",
@@ -67,7 +68,6 @@ go_library(
"//pkg/sentry/usage",
"//pkg/state",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -110,12 +110,12 @@ go_test(
deps = [
":fs",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/fs/ramfs",
"//pkg/sentry/fs/tmpfs",
"//pkg/sentry/kernel/contexttest",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 5aa668873..e48bd4dba 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -161,7 +161,7 @@ func doCopyUp(ctx context.Context, d *Dirent) error {
// then try to take copyMu for writing here, we'd deadlock.
t := d.Inode.overlay.lower.StableAttr.Type
if t != RegularFile && t != Directory && t != Symlink {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Wait to get exclusive access to the upper Inode.
@@ -194,7 +194,7 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx)
if err != nil {
log.Warningf("copy up failed to get lower attributes: %v", err)
- return syserror.EIO
+ return linuxerr.EIO
}
var childUpperInode *Inode
@@ -210,7 +210,7 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
childFile, err := parentUpper.Create(ctx, root, next.name, FileFlags{Read: true, Write: true}, attrs.Perms)
if err != nil {
log.Warningf("copy up failed to create file: %v", err)
- return syserror.EIO
+ return linuxerr.EIO
}
defer childFile.DecRef(ctx)
childUpperInode = childFile.Dirent.Inode
@@ -218,13 +218,13 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
case Directory:
if err := parentUpper.CreateDirectory(ctx, root, next.name, attrs.Perms); err != nil {
log.Warningf("copy up failed to create directory: %v", err)
- return syserror.EIO
+ return linuxerr.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
werr := fmt.Errorf("copy up failed to lookup directory: %v", err)
cleanupUpper(ctx, parentUpper, next.name, werr)
- return syserror.EIO
+ return linuxerr.EIO
}
defer childUpper.DecRef(ctx)
childUpperInode = childUpper.Inode
@@ -234,17 +234,17 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
link, err := childLower.Readlink(ctx)
if err != nil {
log.Warningf("copy up failed to read symlink value: %v", err)
- return syserror.EIO
+ return linuxerr.EIO
}
if err := parentUpper.CreateLink(ctx, root, link, next.name); err != nil {
log.Warningf("copy up failed to create symlink: %v", err)
- return syserror.EIO
+ return linuxerr.EIO
}
childUpper, err := parentUpper.Lookup(ctx, next.name)
if err != nil {
werr := fmt.Errorf("copy up failed to lookup symlink: %v", err)
cleanupUpper(ctx, parentUpper, next.name, werr)
- return syserror.EIO
+ return linuxerr.EIO
}
defer childUpper.DecRef(ctx)
childUpperInode = childUpper.Inode
@@ -258,14 +258,14 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil {
werr := fmt.Errorf("copy up failed to copy up attributes: %v", err)
cleanupUpper(ctx, parentUpper, next.name, werr)
- return syserror.EIO
+ return linuxerr.EIO
}
// Copy the entire file.
if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil {
werr := fmt.Errorf("copy up failed to copy up contents: %v", err)
cleanupUpper(ctx, parentUpper, next.name, werr)
- return syserror.EIO
+ return linuxerr.EIO
}
lowerMappable := next.Inode.overlay.lower.Mappable()
@@ -273,7 +273,7 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
if lowerMappable != nil && upperMappable == nil {
werr := fmt.Errorf("copy up failed: cannot ensure memory mapping coherence")
cleanupUpper(ctx, parentUpper, next.name, werr)
- return syserror.EIO
+ return linuxerr.EIO
}
// Propagate memory mappings to the upper Inode.
@@ -410,7 +410,7 @@ func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error
return err
}
lowerXattr, err := lower.ListXattr(ctx, linux.XATTR_SIZE_MAX)
- if err != nil && err != syserror.EOPNOTSUPP {
+ if err != nil && !linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
return err
}
diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD
index 23a3a9a2d..7baf26b24 100644
--- a/pkg/sentry/fs/dev/BUILD
+++ b/pkg/sentry/fs/dev/BUILD
@@ -18,6 +18,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/rand",
"//pkg/safemem",
@@ -33,7 +34,6 @@ go_library(
"//pkg/sentry/mm",
"//pkg/sentry/pgalloc",
"//pkg/sentry/socket/netstack",
- "//pkg/syserror",
"//pkg/tcpip/link/tun",
"//pkg/usermem",
"//pkg/waiter",
diff --git a/pkg/sentry/fs/dev/full.go b/pkg/sentry/fs/dev/full.go
index deb9c6ad8..6f0c1fc68 100644
--- a/pkg/sentry/fs/dev/full.go
+++ b/pkg/sentry/fs/dev/full.go
@@ -17,9 +17,9 @@ package dev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -77,5 +77,5 @@ var _ fs.FileOperations = (*fullFileOperations)(nil)
// Write implements FileOperations.Write.
func (*fullFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.ENOSPC
+ return 0, linuxerr.ENOSPC
}
diff --git a/pkg/sentry/fs/dev/net_tun.go b/pkg/sentry/fs/dev/net_tun.go
index 77e8d222a..1abf11142 100644
--- a/pkg/sentry/fs/dev/net_tun.go
+++ b/pkg/sentry/fs/dev/net_tun.go
@@ -17,6 +17,7 @@ package dev
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -24,7 +25,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket/netstack"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/link/tun"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -98,11 +98,11 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
switch request {
case linux.TUNSETIFF:
if !t.HasCapability(linux.CAP_NET_ADMIN) {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
stack, ok := t.NetworkContext().(*netstack.Stack)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var req linux.IFReq
@@ -125,7 +125,7 @@ func (n *netTunFileOperations) Ioctl(ctx context.Context, file *fs.File, io user
return 0, err
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fs/dirent.go b/pkg/sentry/fs/dirent.go
index 9d5d40954..d300a32e0 100644
--- a/pkg/sentry/fs/dirent.go
+++ b/pkg/sentry/fs/dirent.go
@@ -22,12 +22,12 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
type globalDirentMap struct {
@@ -487,11 +487,11 @@ func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnl
// Slow path: load the InodeOperations into memory. Since this is a hot path and the lookup may be
// expensive, if possible release the lock and re-acquire it.
if walkMayUnlock {
- d.mu.Unlock()
+ d.mu.Unlock() // +checklocksforce: results in an inconsistent block.
}
c, err := d.Inode.Lookup(ctx, name)
if walkMayUnlock {
- d.mu.Lock()
+ d.mu.Lock() // +checklocksforce: see above.
}
// No dice.
if err != nil {
@@ -593,21 +593,27 @@ func (d *Dirent) exists(ctx context.Context, root *Dirent, name string) bool {
// lockDirectory should be called for any operation that changes this `d`s
// children (creating or removing them).
-func (d *Dirent) lockDirectory() func() {
+// +checklocksacquire:d.dirMu
+// +checklocksacquire:d.mu
+func (d *Dirent) lockDirectory() {
renameMu.RLock()
d.dirMu.Lock()
d.mu.Lock()
- return func() {
- d.mu.Unlock()
- d.dirMu.Unlock()
- renameMu.RUnlock()
- }
+}
+
+// unlockDirectory is the reverse of lockDirectory.
+// +checklocksrelease:d.dirMu
+// +checklocksrelease:d.mu
+func (d *Dirent) unlockDirectory() {
+ d.mu.Unlock()
+ d.dirMu.Unlock()
+ renameMu.RUnlock() // +checklocksforce: see lockDirectory.
}
// Create creates a new regular file in this directory.
func (d *Dirent) Create(ctx context.Context, root *Dirent, name string, flags FileFlags, perms FilePermissions) (*File, error) {
- unlock := d.lockDirectory()
- defer unlock()
+ d.lockDirectory()
+ defer d.unlockDirectory()
// Does something already exist?
if d.exists(ctx, root, name) {
@@ -669,8 +675,8 @@ func (d *Dirent) finishCreate(ctx context.Context, child *Dirent, name string) {
// genericCreate executes create if name does not exist. Removes a negative Dirent at name if
// create succeeds.
func (d *Dirent) genericCreate(ctx context.Context, root *Dirent, name string, create func() error) error {
- unlock := d.lockDirectory()
- defer unlock()
+ d.lockDirectory()
+ defer d.unlockDirectory()
// Does something already exist?
if d.exists(ctx, root, name) {
@@ -857,7 +863,7 @@ func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent,
// Once we have written entries for "." and "..", future errors from
// IterateDir will be hidden.
if !IsDir(d.Inode.StableAttr) {
- return 0, syserror.ENOTDIR
+ return 0, linuxerr.ENOTDIR
}
// This is a special case for lseek(fd, 0, SEEK_END).
@@ -956,14 +962,14 @@ func (d *Dirent) isMountPointLocked() bool {
func (d *Dirent) mount(ctx context.Context, inode *Inode) (newChild *Dirent, err error) {
// Did we race with deletion?
if atomic.LoadInt32(&d.deleted) != 0 {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
// Refuse to mount a symlink.
//
// See Linux equivalent in fs/namespace.c:do_add_mount.
if IsSymlink(inode.StableAttr) {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
// Dirent that'll replace d.
@@ -991,7 +997,7 @@ func (d *Dirent) mount(ctx context.Context, inode *Inode) (newChild *Dirent, err
func (d *Dirent) unmount(ctx context.Context, replacement *Dirent) error {
// Did we race with deletion?
if atomic.LoadInt32(&d.deleted) != 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// Remount our former child in its place.
@@ -1020,8 +1026,8 @@ func (d *Dirent) Remove(ctx context.Context, root *Dirent, name string, dirPath
panic("Dirent.Remove: root must not be nil")
}
- unlock := d.lockDirectory()
- defer unlock()
+ d.lockDirectory()
+ defer d.unlockDirectory()
// Try to walk to the node.
child, err := d.walk(ctx, root, name, false /* may unlock */)
@@ -1081,8 +1087,8 @@ func (d *Dirent) RemoveDirectory(ctx context.Context, root *Dirent, name string)
panic("Dirent.Remove: root must not be nil")
}
- unlock := d.lockDirectory()
- defer unlock()
+ d.lockDirectory()
+ defer d.unlockDirectory()
// Check for dots.
if name == "." {
@@ -1258,17 +1264,15 @@ func (d *Dirent) dropExtendedReference() {
d.Inode.MountSource.fscache.Remove(d)
}
-// lockForRename takes locks on oldParent and newParent as required by Rename
-// and returns a function that will unlock the locks taken. The returned
-// function must be called even if a non-nil error is returned.
-func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName string) (func(), error) {
+// lockForRename takes locks on oldParent and newParent as required by Rename.
+// On return, unlockForRename must always be called, even with an error.
+// +checklocksacquire:oldParent.mu
+// +checklocksacquire:newParent.mu
+func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName string) error {
renameMu.Lock()
if oldParent == newParent {
oldParent.mu.Lock()
- return func() {
- oldParent.mu.Unlock()
- renameMu.Unlock()
- }, nil
+ return nil // +checklocksforce: only one lock exists.
}
// Renaming between directories is a bit subtle:
@@ -1296,11 +1300,7 @@ func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName
// itself.
err = unix.EINVAL
}
- return func() {
- newParent.mu.Unlock()
- oldParent.mu.Unlock()
- renameMu.Unlock()
- }, err
+ return err
}
child = p
}
@@ -1309,17 +1309,27 @@ func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName
// have no relationship; in either case we can do this:
newParent.mu.Lock()
oldParent.mu.Lock()
- return func() {
+ return nil
+}
+
+// unlockForRename is the opposite of lockForRename.
+// +checklocksrelease:oldParent.mu
+// +checklocksrelease:newParent.mu
+func unlockForRename(oldParent, newParent *Dirent) {
+ if oldParent == newParent {
oldParent.mu.Unlock()
- newParent.mu.Unlock()
- renameMu.Unlock()
- }, nil
+ renameMu.Unlock() // +checklocksforce: only one lock exists.
+ return
+ }
+ newParent.mu.Unlock()
+ oldParent.mu.Unlock()
+ renameMu.Unlock() // +checklocksforce: not tracked.
}
func (d *Dirent) checkSticky(ctx context.Context, victim *Dirent) error {
uattr, err := d.Inode.UnstableAttr(ctx)
if err != nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if !uattr.Perms.Sticky {
return nil
@@ -1332,7 +1342,7 @@ func (d *Dirent) checkSticky(ctx context.Context, victim *Dirent) error {
vuattr, err := victim.Inode.UnstableAttr(ctx)
if err != nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if vuattr.Owner.UID == creds.EffectiveKUID {
return nil
@@ -1340,7 +1350,7 @@ func (d *Dirent) checkSticky(ctx context.Context, victim *Dirent) error {
if victim.Inode.CheckCapability(ctx, linux.CAP_FOWNER) {
return nil
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// MayDelete determines whether `name`, a child of `d`, can be deleted or
@@ -1352,8 +1362,8 @@ func (d *Dirent) MayDelete(ctx context.Context, root *Dirent, name string) error
return err
}
- unlock := d.lockDirectory()
- defer unlock()
+ d.lockDirectory()
+ defer d.unlockDirectory()
victim, err := d.walk(ctx, root, name, true /* may unlock */)
if err != nil {
@@ -1374,7 +1384,7 @@ func (d *Dirent) mayDelete(ctx context.Context, victim *Dirent) error {
}
if victim.IsRoot() {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
return nil
@@ -1391,8 +1401,8 @@ func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string
}
// Acquire global renameMu lock, and mu locks on oldParent/newParent.
- unlock, err := lockForRename(oldParent, oldName, newParent, newName)
- defer unlock()
+ err := lockForRename(oldParent, oldName, newParent, newName)
+ defer unlockForRename(oldParent, newParent)
if err != nil {
return err
}
@@ -1439,7 +1449,7 @@ func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string
// replaced is the dirent that is being overwritten by rename.
replaced, err := newParent.walk(ctx, root, newName, false /* may unlock */)
if err != nil {
- if err != syserror.ENOENT {
+ if !linuxerr.Equals(linuxerr.ENOENT, err) {
return err
}
diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD
index 2120f2bad..9f1fe5160 100644
--- a/pkg/sentry/fs/fdpipe/BUILD
+++ b/pkg/sentry/fs/fdpipe/BUILD
@@ -13,6 +13,7 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/fdnotifier",
"//pkg/log",
@@ -21,7 +22,6 @@ go_library(
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -38,12 +38,13 @@ go_test(
library = ":fdpipe",
deps = [
"//pkg/context",
+ "//pkg/errors",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/fdnotifier",
"//pkg/hostarch",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
- "//pkg/syserror",
"//pkg/usermem",
"@com_github_google_uuid//:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
diff --git a/pkg/sentry/fs/fdpipe/pipe.go b/pkg/sentry/fs/fdpipe/pipe.go
index 757b7d511..4370cce33 100644
--- a/pkg/sentry/fs/fdpipe/pipe.go
+++ b/pkg/sentry/fs/fdpipe/pipe.go
@@ -20,6 +20,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/log"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -141,7 +141,7 @@ func (p *pipeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IO
n, err := dst.CopyOutFrom(ctx, safemem.FromIOReader{secio.FullReader{p.file}})
total := int64(bufN) + n
if err != nil && isBlockError(err) {
- return total, syserror.ErrWouldBlock
+ return total, linuxerr.ErrWouldBlock
}
return total, err
}
@@ -150,15 +150,15 @@ func (p *pipeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IO
func (p *pipeOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) {
n, err := src.CopyInTo(ctx, safemem.FromIOWriter{p.file})
if err != nil && isBlockError(err) {
- return n, syserror.ErrWouldBlock
+ return n, linuxerr.ErrWouldBlock
}
return n, err
}
// isBlockError unwraps os errors and checks if they are caused by EAGAIN or
-// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock.
+// EWOULDBLOCK. This is so they can be transformed into linuxerr.ErrWouldBlock.
func isBlockError(err error) bool {
- if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK {
+ if linuxerr.Equals(linuxerr.EAGAIN, err) || linuxerr.Equals(linuxerr.EWOULDBLOCK, err) {
return true
}
if pe, ok := err.(*os.PathError); ok {
diff --git a/pkg/sentry/fs/fdpipe/pipe_opener.go b/pkg/sentry/fs/fdpipe/pipe_opener.go
index adda19168..e91e1b5cb 100644
--- a/pkg/sentry/fs/fdpipe/pipe_opener.go
+++ b/pkg/sentry/fs/fdpipe/pipe_opener.go
@@ -21,9 +21,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// NonBlockingOpener is a generic host file opener used to retry opening host
@@ -40,7 +40,7 @@ func Open(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (fs
p := &pipeOpenState{}
canceled := false
for {
- if file, err := p.TryOpen(ctx, opener, flags); err != syserror.ErrWouldBlock {
+ if file, err := p.TryOpen(ctx, opener, flags); err != linuxerr.ErrWouldBlock {
return file, err
}
@@ -51,7 +51,7 @@ func Open(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (fs
if p.hostFile != nil {
p.hostFile.Close()
}
- return nil, syserror.ErrInterrupted
+ return nil, linuxerr.ErrInterrupted
}
cancel := ctx.SleepStart()
@@ -106,13 +106,13 @@ func (p *pipeOpenState) TryOpen(ctx context.Context, opener NonBlockingOpener, f
}
return newPipeOperations(ctx, opener, flags, f, nil)
- // Handle opening O_WRONLY blocking: convert ENXIO to syserror.ErrWouldBlock.
+ // Handle opening O_WRONLY blocking: convert ENXIO to linuxerr.ErrWouldBlock.
// See TryOpenWriteOnly for more details.
case flags.Write:
return p.TryOpenWriteOnly(ctx, opener)
default:
- // Handle opening O_RDONLY blocking: convert EOF from read to syserror.ErrWouldBlock.
+ // Handle opening O_RDONLY blocking: convert EOF from read to linuxerr.ErrWouldBlock.
// See TryOpenReadOnly for more details.
return p.TryOpenReadOnly(ctx, opener)
}
@@ -120,7 +120,7 @@ func (p *pipeOpenState) TryOpen(ctx context.Context, opener NonBlockingOpener, f
// TryOpenReadOnly tries to open a host pipe read only but only returns a fs.File when
// there is a coordinating writer. Call TryOpenReadOnly repeatedly on the same pipeOpenState
-// until syserror.ErrWouldBlock is no longer returned.
+// until linuxerr.ErrWouldBlock is no longer returned.
//
// How it works:
//
@@ -150,7 +150,7 @@ func (p *pipeOpenState) TryOpenReadOnly(ctx context.Context, opener NonBlockingO
if n == 0 {
// EOF means that we're not ready yet.
if rerr == nil || rerr == io.EOF {
- return nil, syserror.ErrWouldBlock
+ return nil, linuxerr.ErrWouldBlock
}
// Any error that is not EWOULDBLOCK also means we're not
// ready yet, and probably never will be ready. In this
@@ -175,16 +175,16 @@ func (p *pipeOpenState) TryOpenReadOnly(ctx context.Context, opener NonBlockingO
// TryOpenWriteOnly tries to open a host pipe write only but only returns a fs.File when
// there is a coordinating reader. Call TryOpenWriteOnly repeatedly on the same pipeOpenState
-// until syserror.ErrWouldBlock is no longer returned.
+// until linuxerr.ErrWouldBlock is no longer returned.
//
// How it works:
//
// Opening a pipe write only will return ENXIO until readers are available. Converts the ENXIO
-// to an syserror.ErrWouldBlock, to tell callers to retry.
+// to an linuxerr.ErrWouldBlock, to tell callers to retry.
func (*pipeOpenState) TryOpenWriteOnly(ctx context.Context, opener NonBlockingOpener) (*pipeOperations, error) {
hostFile, err := opener.NonBlockingOpen(ctx, fs.PermMask{Write: true})
if unwrapError(err) == unix.ENXIO {
- return nil, syserror.ErrWouldBlock
+ return nil, linuxerr.ErrWouldBlock
}
if err != nil {
return nil, err
diff --git a/pkg/sentry/fs/fdpipe/pipe_opener_test.go b/pkg/sentry/fs/fdpipe/pipe_opener_test.go
index 7b3ff191f..e1587288e 100644
--- a/pkg/sentry/fs/fdpipe/pipe_opener_test.go
+++ b/pkg/sentry/fs/fdpipe/pipe_opener_test.go
@@ -25,12 +25,11 @@ import (
"github.com/google/uuid"
"golang.org/x/sys/unix"
-
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -146,18 +145,18 @@ func TestTryOpen(t *testing.T) {
err: unix.ENOENT,
},
{
- desc: "Blocking Write only returns with syserror.ErrWouldBlock",
+ desc: "Blocking Write only returns with linuxerr.ErrWouldBlock",
makePipe: true,
flags: fs.FileFlags{Write: true},
expectFile: false,
- err: syserror.ErrWouldBlock,
+ err: linuxerr.ErrWouldBlock,
},
{
- desc: "Blocking Read only returns with syserror.ErrWouldBlock",
+ desc: "Blocking Read only returns with linuxerr.ErrWouldBlock",
makePipe: true,
flags: fs.FileFlags{Read: true},
expectFile: false,
- err: syserror.ErrWouldBlock,
+ err: linuxerr.ErrWouldBlock,
},
} {
name := pipename()
@@ -316,7 +315,7 @@ func TestCopiedReadAheadBuffer(t *testing.T) {
// another writer comes along. This means we can open the same pipe write only
// with no problems + write to it, given that opener.Open already tried to open
// the pipe RDONLY and succeeded, which we know happened if TryOpen returns
- // syserror.ErrwouldBlock.
+ // linuxerr.ErrwouldBlock.
//
// This simulates the open(RDONLY) <-> open(WRONLY)+write race we care about, but
// does not cause our test to be racy (which would be terrible).
@@ -328,8 +327,8 @@ func TestCopiedReadAheadBuffer(t *testing.T) {
pipeOps.Release(ctx)
t.Fatalf("open(%s, %o) got file, want nil", name, unix.O_RDONLY)
}
- if err != syserror.ErrWouldBlock {
- t.Fatalf("open(%s, %o) got error %v, want %v", name, unix.O_RDONLY, err, syserror.ErrWouldBlock)
+ if err != linuxerr.ErrWouldBlock {
+ t.Fatalf("open(%s, %o) got error %v, want %v", name, unix.O_RDONLY, err, linuxerr.ErrWouldBlock)
}
// Then open the same pipe write only and write some bytes to it. The next
@@ -515,8 +514,8 @@ func assertReaderHungup(t *testing.T, desc string, reader io.Reader) bool {
}
func assertWriterHungup(t *testing.T, desc string, writer io.Writer) bool {
- if _, err := writer.Write([]byte("hello")); unwrapError(err) != unix.EPIPE {
- t.Errorf("%s: write to self after hangup got error %v, want %v", desc, err, unix.EPIPE)
+ if _, err := writer.Write([]byte("hello")); !linuxerr.Equals(linuxerr.EPIPE, unwrapError(err)) {
+ t.Errorf("%s: write to self after hangup got error %v, want %v", desc, err, linuxerr.EPIPE)
return false
}
return true
diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go
index ab0e9dac7..63900e766 100644
--- a/pkg/sentry/fs/fdpipe/pipe_test.go
+++ b/pkg/sentry/fs/fdpipe/pipe_test.go
@@ -21,14 +21,14 @@ import (
"testing"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/errors"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
func singlePipeFD() (int, error) {
@@ -209,17 +209,17 @@ func TestPipeRequest(t *testing.T) {
{
desc: "ReadDir on pipe returns ENOTDIR",
context: &ReadDir{},
- err: unix.ENOTDIR,
+ err: linuxerr.ENOTDIR,
},
{
desc: "Fsync on pipe returns EINVAL",
context: &Fsync{},
- err: unix.EINVAL,
+ err: linuxerr.EINVAL,
},
{
desc: "Seek on pipe returns ESPIPE",
context: &Seek{},
- err: unix.ESPIPE,
+ err: linuxerr.ESPIPE,
},
{
desc: "Readv on pipe from empty buffer returns nil",
@@ -237,7 +237,7 @@ func TestPipeRequest(t *testing.T) {
context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))},
flags: fs.FileFlags{Read: true},
keepOpenPartner: true,
- err: syserror.ErrWouldBlock,
+ err: linuxerr.ErrWouldBlock,
},
{
desc: "Writev on pipe from empty buffer returns nil",
@@ -248,7 +248,7 @@ func TestPipeRequest(t *testing.T) {
desc: "Writev on pipe from non-empty buffer and closed partner returns EPIPE",
context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))},
flags: fs.FileFlags{Write: true},
- err: unix.EPIPE,
+ err: linuxerr.EPIPE,
},
{
desc: "Writev on pipe from non-empty buffer and open partner succeeds",
@@ -307,7 +307,11 @@ func TestPipeRequest(t *testing.T) {
t.Errorf("%s: unknown request type %T", test.desc, test.context)
}
- if unwrapError(err) != test.err {
+ if linuxErr, ok := test.err.(*errors.Error); ok {
+ if !linuxerr.Equals(linuxErr, unwrapError(err)) {
+ t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
+ }
+ } else if test.err != unwrapError(err) {
t.Errorf("%s: got error %v, want %v", test.desc, err, test.err)
}
}
@@ -405,8 +409,8 @@ func TestPipeReadsAccumulate(t *testing.T) {
n, err := p.Read(ctx, file, iov, 0)
total := n
iov = iov.DropFirst64(n)
- if err != syserror.ErrWouldBlock {
- t.Fatalf("Readv got error %v, want %v", err, syserror.ErrWouldBlock)
+ if err != linuxerr.ErrWouldBlock {
+ t.Fatalf("Readv got error %v, want %v", err, linuxerr.ErrWouldBlock)
}
// Write a few more bytes to allow us to read more/accumulate.
@@ -474,8 +478,8 @@ func TestPipeWritesAccumulate(t *testing.T) {
}
iov := usermem.BytesIOSequence(writeBuffer)
n, err := p.Write(ctx, file, iov, 0)
- if err != syserror.ErrWouldBlock {
- t.Fatalf("Writev got error %v, want %v", err, syserror.ErrWouldBlock)
+ if err != linuxerr.ErrWouldBlock {
+ t.Fatalf("Writev got error %v, want %v", err, linuxerr.ErrWouldBlock)
}
if n != int64(pipeSize) {
t.Fatalf("Writev partial write, got: %v, want %v", n, pipeSize)
diff --git a/pkg/sentry/fs/file.go b/pkg/sentry/fs/file.go
index 57f904801..df04f044d 100644
--- a/pkg/sentry/fs/file.go
+++ b/pkg/sentry/fs/file.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/amutex"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
@@ -27,7 +28,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -195,10 +195,10 @@ func (f *File) EventUnregister(e *waiter.Entry) {
// offset to the value returned by f.FileOperations.Seek if the operation
// is successful.
//
-// Returns syserror.ErrInterrupted if seeking was interrupted.
+// Returns linuxerr.ErrInterrupted if seeking was interrupted.
func (f *File) Seek(ctx context.Context, whence SeekWhence, offset int64) (int64, error) {
if !f.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -217,10 +217,10 @@ func (f *File) Seek(ctx context.Context, whence SeekWhence, offset int64) (int64
// Readdir unconditionally updates the access time on the File's Inode,
// see fs/readdir.c:iterate_dir.
//
-// Returns syserror.ErrInterrupted if reading was interrupted.
+// Returns linuxerr.ErrInterrupted if reading was interrupted.
func (f *File) Readdir(ctx context.Context, serializer DentrySerializer) error {
if !f.mu.Lock(ctx) {
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -232,13 +232,13 @@ func (f *File) Readdir(ctx context.Context, serializer DentrySerializer) error {
// Readv calls f.FileOperations.Read with f as the File, advancing the file
// offset if f.FileOperations.Read returns bytes read > 0.
//
-// Returns syserror.ErrInterrupted if reading was interrupted.
+// Returns linuxerr.ErrInterrupted if reading was interrupted.
func (f *File) Readv(ctx context.Context, dst usermem.IOSequence) (int64, error) {
start := fsmetric.StartReadWait()
defer fsmetric.FinishReadWait(fsmetric.ReadWait, start)
if !f.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
fsmetric.Reads.Increment()
@@ -260,7 +260,7 @@ func (f *File) Preadv(ctx context.Context, dst usermem.IOSequence, offset int64)
defer fsmetric.FinishReadWait(fsmetric.ReadWait, start)
if !f.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
fsmetric.Reads.Increment()
@@ -276,10 +276,10 @@ func (f *File) Preadv(ctx context.Context, dst usermem.IOSequence, offset int64)
// unavoidably racy for network file systems. Writev also truncates src
// to avoid overrunning the current file size limit if necessary.
//
-// Returns syserror.ErrInterrupted if writing was interrupted.
+// Returns linuxerr.ErrInterrupted if writing was interrupted.
func (f *File) Writev(ctx context.Context, src usermem.IOSequence) (int64, error) {
if !f.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
unlockAppendMu := f.Dirent.Inode.lockAppendMu(f.Flags().Append)
// Handle append mode.
@@ -297,7 +297,7 @@ func (f *File) Writev(ctx context.Context, src usermem.IOSequence) (int64, error
case ok && limit == 0:
unlockAppendMu()
f.mu.Unlock()
- return 0, syserror.ErrExceedsFileSizeLimit
+ return 0, linuxerr.ErrExceedsFileSizeLimit
case ok:
src = src.TakeFirst64(limit)
}
@@ -335,7 +335,7 @@ func (f *File) Pwritev(ctx context.Context, src usermem.IOSequence, offset int64
limit, ok := f.checkLimit(ctx, offset)
switch {
case ok && limit == 0:
- return 0, syserror.ErrExceedsFileSizeLimit
+ return 0, linuxerr.ErrExceedsFileSizeLimit
case ok:
src = src.TakeFirst64(limit)
}
@@ -352,7 +352,7 @@ func (f *File) offsetForAppend(ctx context.Context, offset *int64) error {
if err != nil {
// This is an odd error, we treat it as evidence that
// something is terribly wrong with the filesystem.
- return syserror.EIO
+ return linuxerr.EIO
}
// Update the offset.
@@ -381,10 +381,10 @@ func (f *File) checkLimit(ctx context.Context, offset int64) (int64, bool) {
// Fsync calls f.FileOperations.Fsync with f as the File.
//
-// Returns syserror.ErrInterrupted if syncing was interrupted.
+// Returns linuxerr.ErrInterrupted if syncing was interrupted.
func (f *File) Fsync(ctx context.Context, start int64, end int64, syncType SyncType) error {
if !f.mu.Lock(ctx) {
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -393,10 +393,10 @@ func (f *File) Fsync(ctx context.Context, start int64, end int64, syncType SyncT
// Flush calls f.FileOperations.Flush with f as the File.
//
-// Returns syserror.ErrInterrupted if syncing was interrupted.
+// Returns linuxerr.ErrInterrupted if syncing was interrupted.
func (f *File) Flush(ctx context.Context) error {
if !f.mu.Lock(ctx) {
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -405,10 +405,10 @@ func (f *File) Flush(ctx context.Context) error {
// ConfigureMMap calls f.FileOperations.ConfigureMMap with f as the File.
//
-// Returns syserror.ErrInterrupted if interrupted.
+// Returns linuxerr.ErrInterrupted if interrupted.
func (f *File) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
if !f.mu.Lock(ctx) {
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -417,10 +417,10 @@ func (f *File) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
// UnstableAttr calls f.FileOperations.UnstableAttr with f as the File.
//
-// Returns syserror.ErrInterrupted if interrupted.
+// Returns linuxerr.ErrInterrupted if interrupted.
func (f *File) UnstableAttr(ctx context.Context) (UnstableAttr, error) {
if !f.mu.Lock(ctx) {
- return UnstableAttr{}, syserror.ErrInterrupted
+ return UnstableAttr{}, linuxerr.ErrInterrupted
}
defer f.mu.Unlock()
@@ -495,7 +495,7 @@ type lockedReader struct {
// Read implements io.Reader.Read.
func (r *lockedReader) Read(buf []byte) (int, error) {
if r.Ctx.Interrupted() {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
n, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), r.Offset)
r.Offset += n
@@ -505,7 +505,7 @@ func (r *lockedReader) Read(buf []byte) (int, error) {
// ReadAt implements io.Reader.ReadAt.
func (r *lockedReader) ReadAt(buf []byte, offset int64) (int, error) {
if r.Ctx.Interrupted() {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
n, err := r.File.FileOperations.Read(r.Ctx, r.File, usermem.BytesIOSequence(buf), offset)
return int(n), err
@@ -530,7 +530,7 @@ type lockedWriter struct {
// Write implements io.Writer.Write.
func (w *lockedWriter) Write(buf []byte) (int, error) {
if w.Ctx.Interrupted() {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
n, err := w.WriteAt(buf, w.Offset)
w.Offset += int64(n)
@@ -549,7 +549,7 @@ func (w *lockedWriter) WriteAt(buf []byte, offset int64) (int, error) {
// contract. Enforce that here.
for written < len(buf) {
if w.Ctx.Interrupted() {
- return written, syserror.ErrInterrupted
+ return written, linuxerr.ErrInterrupted
}
var n int64
n, err = w.File.FileOperations.Write(w.Ctx, w.File, usermem.BytesIOSequence(buf[written:]), offset+int64(written))
diff --git a/pkg/sentry/fs/file_operations.go b/pkg/sentry/fs/file_operations.go
index 6ec721022..ce47c3907 100644
--- a/pkg/sentry/fs/file_operations.go
+++ b/pkg/sentry/fs/file_operations.go
@@ -120,7 +120,7 @@ type FileOperations interface {
// Files with !FileFlags.Pwrite.
//
// If only part of src could be written, Write must return an error
- // indicating why (e.g. syserror.ErrWouldBlock).
+ // indicating why (e.g. linuxerr.ErrWouldBlock).
//
// Write does not check permissions nor flags.
//
diff --git a/pkg/sentry/fs/file_overlay.go b/pkg/sentry/fs/file_overlay.go
index 696613f3a..a27dd0b9a 100644
--- a/pkg/sentry/fs/file_overlay.go
+++ b/pkg/sentry/fs/file_overlay.go
@@ -16,13 +16,14 @@ package fs
import (
"io"
+ "math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -245,7 +246,7 @@ func (f *overlayFileOperations) onTop(ctx context.Context, file *File, fn func(*
// Something very wrong; return a generic filesystem
// error to avoid propagating internals.
f.upperMu.Unlock()
- return syserror.EIO
+ return linuxerr.EIO
}
// Save upper file.
@@ -357,13 +358,16 @@ func (*overlayFileOperations) ConfigureMMap(ctx context.Context, file *File, opt
}
if !o.isMappableLocked() {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
- // FIXME(jamieliu): This is a copy/paste of fsutil.GenericConfigureMMap,
- // which we can't use because the overlay implementation is in package fs,
- // so depending on fs/fsutil would create a circular dependency. Move
- // overlay to fs/overlay.
+ // TODO(gvisor.dev/issue/1624): This is a copy/paste of
+ // fsutil.GenericConfigureMMap, which we can't use because the overlay
+ // implementation is in package fs, so depending on fs/fsutil would create
+ // a circular dependency. VFS2 overlay doesn't have this issue.
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = o
opts.MappingIdentity = file
file.IncRef()
@@ -407,7 +411,7 @@ func (f *overlayFileOperations) Ioctl(ctx context.Context, overlayFile *File, io
// copy up on any ioctl would be too drastic. In the future, it can have a
// list of ioctls that are safe to send to lower and a list that triggers a
// copy up.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
return f.upper.FileOperations.Ioctl(ctx, f.upper, io, args)
}
@@ -417,7 +421,7 @@ func (f *overlayFileOperations) FifoSize(ctx context.Context, overlayFile *File)
err = f.onTop(ctx, overlayFile, func(file *File, ops FileOperations) error {
sz, ok := ops.(FifoSizer)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rv, err = sz.FifoSize(ctx, file)
return err
@@ -432,11 +436,11 @@ func (f *overlayFileOperations) SetFifoSize(size int64) (rv int64, err error) {
if f.upper == nil {
// Named pipes cannot be copied up and changes to the lower are prohibited.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
sz, ok := f.upper.FileOperations.(FifoSizer)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
return sz.SetFifoSize(size)
}
diff --git a/pkg/sentry/fs/fs.go b/pkg/sentry/fs/fs.go
index 44587bb37..a346c316b 100644
--- a/pkg/sentry/fs/fs.go
+++ b/pkg/sentry/fs/fs.go
@@ -80,23 +80,33 @@ func AsyncBarrier() {
// Async executes a function asynchronously.
//
// Async must not be called recursively.
+// +checklocksignore
func Async(f func()) {
workMu.RLock()
- go func() { // S/R-SAFE: AsyncBarrier must be called.
- defer workMu.RUnlock() // Ensure RUnlock in case of panic.
- f()
- }()
+ go asyncWork(f) // S/R-SAFE: AsyncBarrier must be called.
+}
+
+// +checklocksignore
+func asyncWork(f func()) {
+ // Ensure RUnlock in case of panic.
+ defer workMu.RUnlock()
+ f()
}
// AsyncWithContext is just like Async, except that it calls the asynchronous
// function with the given context as argument. This function exists to avoid
// needing to allocate an extra function on the heap in a hot path.
+// +checklocksignore
func AsyncWithContext(ctx context.Context, f func(context.Context)) {
workMu.RLock()
- go func() { // S/R-SAFE: AsyncBarrier must be called.
- defer workMu.RUnlock() // Ensure RUnlock in case of panic.
- f(ctx)
- }()
+ go asyncWorkWithContext(ctx, f)
+}
+
+// +checklocksignore
+func asyncWorkWithContext(ctx context.Context, f func(context.Context)) {
+ // Ensure RUnlock in case of panic.
+ defer workMu.RUnlock()
+ f(ctx)
}
// AsyncErrorBarrier waits for all outstanding asynchronous work to complete, or
diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD
index 6469cc3a9..1a59800ea 100644
--- a/pkg/sentry/fs/fsutil/BUILD
+++ b/pkg/sentry/fs/fsutil/BUILD
@@ -76,6 +76,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/safemem",
@@ -89,7 +90,6 @@ go_library(
"//pkg/sentry/usage",
"//pkg/state",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -106,13 +106,13 @@ go_test(
library = ":fsutil",
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/safemem",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
"//pkg/sentry/kernel/time",
"//pkg/sentry/memmap",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fs/fsutil/file.go b/pkg/sentry/fs/fsutil/file.go
index dc9efa5df..38e3ed42d 100644
--- a/pkg/sentry/fs/fsutil/file.go
+++ b/pkg/sentry/fs/fsutil/file.go
@@ -16,12 +16,13 @@ package fsutil
import (
"io"
+ "math"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -45,7 +46,7 @@ func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence,
// Does the Inode represents a non-seekable type?
if fs.IsPipe(inode.StableAttr) || fs.IsSocket(inode.StableAttr) {
- return current, syserror.ESPIPE
+ return current, linuxerr.ESPIPE
}
// Does the Inode represent a character device?
@@ -63,12 +64,12 @@ func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence,
switch inode.StableAttr.Type {
case fs.RegularFile, fs.SpecialFile, fs.BlockDevice:
if offset < 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
return offset, nil
case fs.Directory, fs.SpecialDirectory:
if offset != 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
// SEEK_SET to 0 moves the directory "cursor" to the beginning.
if dirCursor != nil {
@@ -76,22 +77,22 @@ func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence,
}
return 0, nil
default:
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
case fs.SeekCurrent:
switch inode.StableAttr.Type {
case fs.RegularFile, fs.SpecialFile, fs.BlockDevice:
if current+offset < 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
return current + offset, nil
case fs.Directory, fs.SpecialDirectory:
if offset != 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
return current, nil
default:
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
case fs.SeekEnd:
switch inode.StableAttr.Type {
@@ -103,14 +104,14 @@ func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence,
}
sz := uattr.Size
if sz+offset < 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
return sz + offset, nil
// FIXME(b/34778850): This is not universally correct.
// Remove SpecialDirectory.
case fs.SpecialDirectory:
if offset != 0 {
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
// SEEK_END to 0 moves the directory "cursor" to the end.
//
@@ -121,12 +122,12 @@ func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence,
// futile (EOF will always be the result).
return fs.FileMaxOffset, nil
default:
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
}
// Not a valid seek request.
- return current, syserror.EINVAL
+ return current, linuxerr.EINVAL
}
// FileGenericSeek implements fs.FileOperations.Seek for files that use a
@@ -152,7 +153,7 @@ type FileNoSeek struct{}
// Seek implements fs.FileOperations.Seek.
func (FileNoSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// FilePipeSeek implements fs.FileOperations.Seek and can be used for files
@@ -161,7 +162,7 @@ type FilePipeSeek struct{}
// Seek implements fs.FileOperations.Seek.
func (FilePipeSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// FileNotDirReaddir implements fs.FileOperations.Readdir for non-directories.
@@ -169,7 +170,7 @@ type FileNotDirReaddir struct{}
// Readdir implements fs.FileOperations.FileNotDirReaddir.
func (FileNotDirReaddir) Readdir(context.Context, *fs.File, fs.DentrySerializer) (int64, error) {
- return 0, syserror.ENOTDIR
+ return 0, linuxerr.ENOTDIR
}
// FileNoFsync implements fs.FileOperations.Fsync for files that don't support
@@ -178,7 +179,7 @@ type FileNoFsync struct{}
// Fsync implements fs.FileOperations.Fsync.
func (FileNoFsync) Fsync(context.Context, *fs.File, int64, int64, fs.SyncType) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// FileNoopFsync implements fs.FileOperations.Fsync for files that don't need
@@ -204,12 +205,15 @@ type FileNoMMap struct{}
// ConfigureMMap implements fs.FileOperations.ConfigureMMap.
func (FileNoMMap) ConfigureMMap(context.Context, *fs.File, *memmap.MMapOpts) error {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
// GenericConfigureMMap implements fs.FileOperations.ConfigureMMap for most
// filesystems that support memory mapping.
func GenericConfigureMMap(file *fs.File, m memmap.Mappable, opts *memmap.MMapOpts) error {
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = m
opts.MappingIdentity = file
file.IncRef()
@@ -222,7 +226,7 @@ type FileNoIoctl struct{}
// Ioctl implements fs.FileOperations.Ioctl.
func (FileNoIoctl) Ioctl(context.Context, *fs.File, usermem.IO, arch.SyscallArguments) (uintptr, error) {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// FileNoSplice implements fs.FileOperations.ReadFrom and
@@ -231,12 +235,12 @@ type FileNoSplice struct{}
// WriteTo implements fs.FileOperations.WriteTo.
func (FileNoSplice) WriteTo(context.Context, *fs.File, io.Writer, int64, bool) (int64, error) {
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// ReadFrom implements fs.FileOperations.ReadFrom.
func (FileNoSplice) ReadFrom(context.Context, *fs.File, io.Reader, int64) (int64, error) {
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// DirFileOperations implements most of fs.FileOperations for directories,
@@ -254,12 +258,12 @@ type DirFileOperations struct {
// Read implements fs.FileOperations.Read
func (*DirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Write implements fs.FileOperations.Write.
func (*DirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// StaticDirFileOperations implements fs.FileOperations for directories with
@@ -345,7 +349,7 @@ func NewFileStaticContentReader(b []byte) FileStaticContentReader {
// Read implements fs.FileOperations.Read.
func (scr *FileStaticContentReader) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset >= int64(len(scr.content)) {
return 0, nil
@@ -367,7 +371,7 @@ type FileNoRead struct{}
// Read implements fs.FileOperations.Read.
func (FileNoRead) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// FileNoWrite implements fs.FileOperations.Write to return EINVAL.
@@ -375,7 +379,7 @@ type FileNoWrite struct{}
// Write implements fs.FileOperations.Write.
func (FileNoWrite) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// FileNoopRead implement fs.FileOperations.Read as a noop.
diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go
index 23528bf25..37ddb1a3c 100644
--- a/pkg/sentry/fs/fsutil/host_file_mapper.go
+++ b/pkg/sentry/fs/fsutil/host_file_mapper.go
@@ -93,7 +93,8 @@ func NewHostFileMapper() *HostFileMapper {
func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
- for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize {
+ chunkStart := mr.Start &^ chunkMask
+ for {
refs := f.refs[chunkStart]
pgs := pagesInChunk(mr, chunkStart)
if refs+pgs < refs {
@@ -101,6 +102,10 @@ func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
panic(fmt.Sprintf("HostFileMapper.IncRefOn(%v): adding %d page references to chunk %#x, which has %d page references", mr, pgs, chunkStart, refs))
}
f.refs[chunkStart] = refs + pgs
+ chunkStart += chunkSize
+ if chunkStart >= mr.End || chunkStart == 0 {
+ break
+ }
}
}
@@ -112,7 +117,8 @@ func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) {
func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) {
f.refsMu.Lock()
defer f.refsMu.Unlock()
- for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize {
+ chunkStart := mr.Start &^ chunkMask
+ for {
refs := f.refs[chunkStart]
pgs := pagesInChunk(mr, chunkStart)
switch {
@@ -128,6 +134,10 @@ func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) {
case refs < pgs:
panic(fmt.Sprintf("HostFileMapper.DecRefOn(%v): removing %d page references from chunk %#x, which has %d page references", mr, pgs, chunkStart, refs))
}
+ chunkStart += chunkSize
+ if chunkStart >= mr.End || chunkStart == 0 {
+ break
+ }
}
}
@@ -161,7 +171,8 @@ func (f *HostFileMapper) forEachMappingBlockLocked(fr memmap.FileRange, fd int,
if write {
prot |= unix.PROT_WRITE
}
- for chunkStart := fr.Start &^ chunkMask; chunkStart < fr.End; chunkStart += chunkSize {
+ chunkStart := fr.Start &^ chunkMask
+ for {
m, ok := f.mappings[chunkStart]
if !ok {
addr, _, errno := unix.Syscall6(
@@ -201,6 +212,10 @@ func (f *HostFileMapper) forEachMappingBlockLocked(fr memmap.FileRange, fd int,
endOff = fr.End - chunkStart
}
fn(f.unsafeBlockFromChunkMapping(m.addr).TakeFirst64(endOff).DropFirst64(startOff))
+ chunkStart += chunkSize
+ if chunkStart >= fr.End || chunkStart == 0 {
+ break
+ }
}
return nil
}
diff --git a/pkg/sentry/fs/fsutil/inode.go b/pkg/sentry/fs/fsutil/inode.go
index 85e7e35db..06a994193 100644
--- a/pkg/sentry/fs/fsutil/inode.go
+++ b/pkg/sentry/fs/fsutil/inode.go
@@ -17,12 +17,12 @@ package fsutil
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -166,7 +166,7 @@ func (i *InodeSimpleAttributes) DropLink() {
// StatFS implements fs.InodeOperations.StatFS.
func (i *InodeSimpleAttributes) StatFS(context.Context) (fs.Info, error) {
if i.fsType == 0 {
- return fs.Info{}, syserror.ENOSYS
+ return fs.Info{}, linuxerr.ENOSYS
}
return fs.Info{Type: i.fsType}, nil
}
@@ -218,7 +218,7 @@ func (i *InodeSimpleExtendedAttributes) GetXattr(_ context.Context, _ *fs.Inode,
value, ok := i.xattrs[name]
i.mu.RUnlock()
if !ok {
- return "", syserror.ENOATTR
+ return "", linuxerr.ENOATTR
}
return value, nil
}
@@ -229,17 +229,17 @@ func (i *InodeSimpleExtendedAttributes) SetXattr(_ context.Context, _ *fs.Inode,
defer i.mu.Unlock()
if i.xattrs == nil {
if flags&linux.XATTR_REPLACE != 0 {
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
i.xattrs = make(map[string]string)
}
_, ok := i.xattrs[name]
if ok && flags&linux.XATTR_CREATE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if !ok && flags&linux.XATTR_REPLACE != 0 {
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
i.xattrs[name] = value
@@ -265,7 +265,7 @@ func (i *InodeSimpleExtendedAttributes) RemoveXattr(_ context.Context, _ *fs.Ino
delete(i.xattrs, name)
return nil
}
- return syserror.ENOATTR
+ return linuxerr.ENOATTR
}
// staticFile is a file with static contents. It is returned by
@@ -293,7 +293,7 @@ type InodeNoStatFS struct{}
// StatFS implements fs.InodeOperations.StatFS.
func (InodeNoStatFS) StatFS(context.Context) (fs.Info, error) {
- return fs.Info{}, syserror.ENOSYS
+ return fs.Info{}, linuxerr.ENOSYS
}
// InodeStaticFileGetter implements GetFile for a file with static contents.
@@ -331,52 +331,52 @@ type InodeNotDirectory struct{}
// Lookup implements fs.InodeOperations.Lookup.
func (InodeNotDirectory) Lookup(context.Context, *fs.Inode, string) (*fs.Dirent, error) {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// Create implements fs.InodeOperations.Create.
func (InodeNotDirectory) Create(context.Context, *fs.Inode, string, fs.FileFlags, fs.FilePermissions) (*fs.File, error) {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// CreateLink implements fs.InodeOperations.CreateLink.
func (InodeNotDirectory) CreateLink(context.Context, *fs.Inode, string, string) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// CreateHardLink implements fs.InodeOperations.CreateHardLink.
func (InodeNotDirectory) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// CreateDirectory implements fs.InodeOperations.CreateDirectory.
func (InodeNotDirectory) CreateDirectory(context.Context, *fs.Inode, string, fs.FilePermissions) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Bind implements fs.InodeOperations.Bind.
func (InodeNotDirectory) Bind(context.Context, *fs.Inode, string, transport.BoundEndpoint, fs.FilePermissions) (*fs.Dirent, error) {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// CreateFifo implements fs.InodeOperations.CreateFifo.
func (InodeNotDirectory) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Remove implements fs.InodeOperations.Remove.
func (InodeNotDirectory) Remove(context.Context, *fs.Inode, string) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// RemoveDirectory implements fs.InodeOperations.RemoveDirectory.
func (InodeNotDirectory) RemoveDirectory(context.Context, *fs.Inode, string) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Rename implements fs.FileOperations.Rename.
func (InodeNotDirectory) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// InodeNotSocket can be used by Inodes that are not sockets.
@@ -392,7 +392,7 @@ type InodeNotTruncatable struct{}
// Truncate implements fs.InodeOperations.Truncate.
func (InodeNotTruncatable) Truncate(context.Context, *fs.Inode, int64) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// InodeIsDirTruncate implements fs.InodeOperations.Truncate for directories.
@@ -400,7 +400,7 @@ type InodeIsDirTruncate struct{}
// Truncate implements fs.InodeOperations.Truncate.
func (InodeIsDirTruncate) Truncate(context.Context, *fs.Inode, int64) error {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
// InodeNoopTruncate implements fs.InodeOperations.Truncate as a noop.
@@ -416,7 +416,7 @@ type InodeNotRenameable struct{}
// Rename implements fs.InodeOperations.Rename.
func (InodeNotRenameable) Rename(context.Context, *fs.Inode, *fs.Inode, string, *fs.Inode, string, bool) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// InodeNotOpenable can be used by Inodes that cannot be opened.
@@ -424,7 +424,7 @@ type InodeNotOpenable struct{}
// GetFile implements fs.InodeOperations.GetFile.
func (InodeNotOpenable) GetFile(context.Context, *fs.Dirent, fs.FileFlags) (*fs.File, error) {
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
// InodeNotVirtual can be used by Inodes that are not virtual.
@@ -448,12 +448,12 @@ type InodeNotSymlink struct{}
// Readlink implements fs.InodeOperations.Readlink.
func (InodeNotSymlink) Readlink(context.Context, *fs.Inode) (string, error) {
- return "", syserror.ENOLINK
+ return "", linuxerr.ENOLINK
}
// Getlink implements fs.InodeOperations.Getlink.
func (InodeNotSymlink) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) {
- return nil, syserror.ENOLINK
+ return nil, linuxerr.ENOLINK
}
// InodeNoExtendedAttributes can be used by Inodes that do not support
@@ -462,22 +462,22 @@ type InodeNoExtendedAttributes struct{}
// GetXattr implements fs.InodeOperations.GetXattr.
func (InodeNoExtendedAttributes) GetXattr(context.Context, *fs.Inode, string, uint64) (string, error) {
- return "", syserror.EOPNOTSUPP
+ return "", linuxerr.EOPNOTSUPP
}
// SetXattr implements fs.InodeOperations.SetXattr.
func (InodeNoExtendedAttributes) SetXattr(context.Context, *fs.Inode, string, string, uint32) error {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
// ListXattr implements fs.InodeOperations.ListXattr.
func (InodeNoExtendedAttributes) ListXattr(context.Context, *fs.Inode, uint64) (map[string]struct{}, error) {
- return nil, syserror.EOPNOTSUPP
+ return nil, linuxerr.EOPNOTSUPP
}
// RemoveXattr implements fs.InodeOperations.RemoveXattr.
func (InodeNoExtendedAttributes) RemoveXattr(context.Context, *fs.Inode, string) error {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
// InodeNoopRelease implements fs.InodeOperations.Release as a noop.
@@ -512,7 +512,7 @@ type InodeNotAllocatable struct{}
// Allocate implements fs.InodeOperations.Allocate.
func (InodeNotAllocatable) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
// InodeNoopAllocate implements fs.InodeOperations.Allocate as a noop.
@@ -528,5 +528,5 @@ type InodeIsDirAllocate struct{}
// Allocate implements fs.InodeOperations.Allocate.
func (InodeIsDirAllocate) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go
index e107c3096..25e76d9f2 100644
--- a/pkg/sentry/fs/fsutil/inode_cached_test.go
+++ b/pkg/sentry/fs/fsutil/inode_cached_test.go
@@ -20,13 +20,13 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -244,7 +244,7 @@ func (*sliceBackingFile) FD() int {
}
func (f *sliceBackingFile) Allocate(ctx context.Context, offset int64, length int64) error {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
type noopMappingSpace struct{}
diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD
index 94cb05246..ee2f287d9 100644
--- a/pkg/sentry/fs/gofer/BUILD
+++ b/pkg/sentry/fs/gofer/BUILD
@@ -26,6 +26,8 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/hostarch",
"//pkg/log",
@@ -47,7 +49,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/unet",
"//pkg/usermem",
"//pkg/waiter",
@@ -62,10 +63,10 @@ go_test(
library = ":gofer",
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/p9",
"//pkg/p9/p9test",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
- "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/fs/gofer/file.go b/pkg/sentry/fs/gofer/file.go
index 73d80d9b5..62a517cd7 100644
--- a/pkg/sentry/fs/gofer/file.go
+++ b/pkg/sentry/fs/gofer/file.go
@@ -20,6 +20,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/p9"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -226,7 +226,7 @@ func (f *fileOperations) maybeSync(ctx context.Context, file *fs.File, offset, n
func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) {
if fs.IsDir(file.Dirent.Inode.StableAttr) {
// Not all remote file systems enforce this so this client does.
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
var (
@@ -294,7 +294,7 @@ func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IO
if fs.IsDir(file.Dirent.Inode.StableAttr) {
// Not all remote file systems enforce this so this client does.
f.incrementReadCounters(start)
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
if f.inodeOperations.session().cachePolicy.useCachingInodeOps(file.Dirent.Inode) {
diff --git a/pkg/sentry/fs/gofer/gofer_test.go b/pkg/sentry/fs/gofer/gofer_test.go
index 546ee7d04..4924debeb 100644
--- a/pkg/sentry/fs/gofer/gofer_test.go
+++ b/pkg/sentry/fs/gofer/gofer_test.go
@@ -19,8 +19,8 @@ import (
"testing"
"time"
- "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/p9/p9test"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
@@ -97,7 +97,7 @@ func TestLookup(t *testing.T) {
},
{
name: "mock Walk fails (function fails)",
- want: unix.ENOENT,
+ want: linuxerr.ENOENT,
},
}
@@ -123,7 +123,7 @@ func TestLookup(t *testing.T) {
var newInodeOperations fs.InodeOperations
if dirent != nil {
if dirent.IsNegative() {
- err = unix.ENOENT
+ err = linuxerr.ENOENT
} else {
newInodeOperations = dirent.Inode.InodeOperations
}
@@ -131,9 +131,11 @@ func TestLookup(t *testing.T) {
// Check return values.
if err != test.want {
+ t.Logf("err: %v %T", err, err)
t.Errorf("Lookup got err %v, want %v", err, test.want)
}
if err == nil && newInodeOperations == nil {
+ t.Logf("err: %v %T", err, err)
t.Errorf("Lookup got non-nil err and non-nil node, wanted at least one non-nil")
}
})
diff --git a/pkg/sentry/fs/gofer/inode.go b/pkg/sentry/fs/gofer/inode.go
index da3178527..c3856094f 100644
--- a/pkg/sentry/fs/gofer/inode.go
+++ b/pkg/sentry/fs/gofer/inode.go
@@ -20,6 +20,8 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ gErr "gvisor.dev/gvisor/pkg/errors"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
@@ -31,7 +33,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/host"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// inodeOperations implements fs.InodeOperations.
@@ -476,7 +477,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
switch d.Inode.StableAttr.Type {
case fs.Socket:
if i.session().overrides != nil {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
return i.getFileSocket(ctx, d, flags)
case fs.Pipe:
@@ -676,7 +677,7 @@ func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string
// Getlink implementfs fs.InodeOperations.Getlink.
func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) {
if !fs.IsSymlink(i.fileState.sattr) {
- return nil, syserror.ENOLINK
+ return nil, linuxerr.ENOLINK
}
return nil, fs.ErrResolveViaReadlink
}
@@ -714,16 +715,16 @@ func (i *inodeOperations) configureMMap(file *fs.File, opts *memmap.MMapOpts) er
if i.fileState.hostMappable != nil {
return fsutil.GenericConfigureMMap(file, i.fileState.hostMappable, opts)
}
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
func init() {
- syserror.AddErrorUnwrapper(func(err error) (unix.Errno, bool) {
+ linuxerr.AddErrorUnwrapper(func(err error) (*gErr.Error, bool) {
if _, ok := err.(p9.ErrSocket); ok {
// Treat as an I/O error.
- return unix.EIO, true
+ return linuxerr.EIO, true
}
- return 0, false
+ return nil, false
})
}
diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go
index e2af1d2ae..19f91f010 100644
--- a/pkg/sentry/fs/gofer/inode_state.go
+++ b/pkg/sentry/fs/gofer/inode_state.go
@@ -112,13 +112,6 @@ func (i *inodeFileState) loadLoading(_ struct{}) {
// +checklocks:i.loading
func (i *inodeFileState) afterLoad() {
load := func() (err error) {
- // See comment on i.loading().
- defer func() {
- if err == nil {
- i.loading.Unlock()
- }
- }()
-
// Manually restore the p9.File.
name, ok := i.s.inodeMappings[i.sattr.InodeID]
if !ok {
@@ -167,6 +160,9 @@ func (i *inodeFileState) afterLoad() {
i.savedUAttr = nil
}
+ // See comment on i.loading(). This only unlocks on the
+ // non-error path.
+ i.loading.Unlock() // +checklocksforce: per comment.
return nil
}
diff --git a/pkg/sentry/fs/gofer/path.go b/pkg/sentry/fs/gofer/path.go
index 940838a44..2f8769f1e 100644
--- a/pkg/sentry/fs/gofer/path.go
+++ b/pkg/sentry/fs/gofer/path.go
@@ -18,13 +18,13 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/sentry/device"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
)
// maxFilenameLen is the maximum length of a filename. This is dictated by 9P's
@@ -43,10 +43,11 @@ func changeType(mode p9.FileMode, newType p9.FileMode) p9.FileMode {
// policy.
func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {
if len(name) > maxFilenameLen {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
- cp := i.session().cachePolicy
+ s := i.session()
+ cp := s.cachePolicy
if cp.cacheReaddir() {
// Check to see if we have readdirCache that indicates the
// child does not exist. Avoid holding readdirMu longer than
@@ -58,7 +59,7 @@ func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string
if cp.cacheNegativeDirents() {
return fs.NewNegativeDirent(name), nil
}
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
i.readdirMu.Unlock()
}
@@ -66,18 +67,18 @@ func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string
// Get a p9.File for name.
qids, newFile, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name})
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
if cp.cacheNegativeDirents() {
// Return a negative Dirent. It will stay cached until something
// is created over it.
return fs.NewNegativeDirent(name), nil
}
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
return nil, err
}
- if i.session().overrides != nil {
+ if s.overrides != nil {
// Check if file belongs to a internal named pipe. Note that it doesn't need
// to check for sockets because it's done in newInodeOperations below.
deviceKey := device.MultiDeviceKey{
@@ -85,13 +86,13 @@ func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string
SecondaryDevice: i.session().connID,
Inode: qids[0].Path,
}
- unlock := i.session().overrides.lock()
- if pipeInode := i.session().overrides.getPipe(deviceKey); pipeInode != nil {
- unlock()
+ s.overrides.lock()
+ if pipeInode := s.overrides.getPipe(deviceKey); pipeInode != nil {
+ s.overrides.unlock()
pipeInode.IncRef()
return fs.NewDirent(ctx, pipeInode, name), nil
}
- unlock()
+ s.overrides.unlock()
}
// Construct the Inode operations.
@@ -106,7 +107,7 @@ func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string
// Ownership is currently ignored.
func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) {
if len(name) > maxFilenameLen {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
// Create replaces the directory fid with the newly created/opened
@@ -167,7 +168,7 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string
hostFile.Close()
}
unopened.close(ctx)
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
qid := qids[0]
@@ -195,7 +196,7 @@ func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string
// CreateLink uses Create to create a symlink between oldname and newname.
func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error {
if len(newname) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
owner := fs.FileOwnerFromContext(ctx)
@@ -209,29 +210,32 @@ func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname
// CreateHardLink implements InodeOperations.CreateHardLink.
func (i *inodeOperations) CreateHardLink(ctx context.Context, inode *fs.Inode, target *fs.Inode, newName string) error {
if len(newName) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
targetOpts, ok := target.InodeOperations.(*inodeOperations)
if !ok {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := i.fileState.file.link(ctx, &targetOpts.fileState.file, newName); err != nil {
return err
}
- if i.session().cachePolicy.cacheUAttrs(inode) {
+
+ s := i.session()
+ if s.cachePolicy.cacheUAttrs(inode) {
// Increase link count.
targetOpts.cachingInodeOps.IncLinks(ctx)
}
+
i.touchModificationAndStatusChangeTime(ctx, inode)
return nil
}
// CreateDirectory uses Create to create a directory named s under inodeOperations.
-func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s string, perm fs.FilePermissions) error {
- if len(s) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error {
+ if len(name) > maxFilenameLen {
+ return linuxerr.ENAMETOOLONG
}
// If the parent directory has setgid enabled, change the new directory's
@@ -246,16 +250,18 @@ func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s
perm.SetGID = true
}
- if _, err := i.fileState.file.mkdir(ctx, s, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {
+ if _, err := i.fileState.file.mkdir(ctx, name, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {
return err
}
- if i.session().cachePolicy.cacheUAttrs(dir) {
+
+ s := i.session()
+ if s.cachePolicy.cacheUAttrs(dir) {
// Increase link count.
//
// N.B. This will update the modification time.
i.cachingInodeOps.IncLinks(ctx)
}
- if i.session().cachePolicy.cacheReaddir() {
+ if s.cachePolicy.cacheReaddir() {
// Invalidate readdir cache.
i.markDirectoryDirty()
}
@@ -265,16 +271,17 @@ func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s
// Bind implements InodeOperations.Bind.
func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) {
if len(name) > maxFilenameLen {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
- if i.session().overrides == nil {
- return nil, syserror.EOPNOTSUPP
+ s := i.session()
+ if s.overrides == nil {
+ return nil, linuxerr.EOPNOTSUPP
}
// Stabilize the override map while creation is in progress.
- unlock := i.session().overrides.lock()
- defer unlock()
+ s.overrides.lock()
+ defer s.overrides.unlock()
sattr, iops, err := i.createEndpointFile(ctx, dir, name, perm, p9.ModeSocket)
if err != nil {
@@ -283,22 +290,23 @@ func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string,
// Construct the positive Dirent.
childDir := fs.NewDirent(ctx, fs.NewInode(ctx, iops, dir.MountSource, sattr), name)
- i.session().overrides.addBoundEndpoint(iops.fileState.key, childDir, ep)
+ s.overrides.addBoundEndpoint(iops.fileState.key, childDir, ep)
return childDir, nil
}
// CreateFifo implements fs.InodeOperations.CreateFifo.
func (i *inodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error {
if len(name) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
owner := fs.FileOwnerFromContext(ctx)
mode := p9.FileMode(perm.LinuxMode()) | p9.ModeNamedPipe
// N.B. FIFOs use major/minor numbers 0.
+ s := i.session()
if _, err := i.fileState.file.mknod(ctx, name, mode, 0, 0, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {
- if i.session().overrides == nil || err != syserror.EPERM {
+ if s.overrides == nil || !linuxerr.Equals(linuxerr.EPERM, err) {
return err
}
// If gofer doesn't support mknod, check if we can create an internal fifo.
@@ -310,13 +318,14 @@ func (i *inodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name st
}
func (i *inodeOperations) createInternalFifo(ctx context.Context, dir *fs.Inode, name string, owner fs.FileOwner, perm fs.FilePermissions) error {
- if i.session().overrides == nil {
- return syserror.EPERM
+ s := i.session()
+ if s.overrides == nil {
+ return linuxerr.EPERM
}
// Stabilize the override map while creation is in progress.
- unlock := i.session().overrides.lock()
- defer unlock()
+ s.overrides.lock()
+ defer s.overrides.unlock()
sattr, fileOps, err := i.createEndpointFile(ctx, dir, name, perm, p9.ModeNamedPipe)
if err != nil {
@@ -335,7 +344,7 @@ func (i *inodeOperations) createInternalFifo(ctx context.Context, dir *fs.Inode,
// Construct the positive Dirent.
childDir := fs.NewDirent(ctx, fs.NewInode(ctx, iops, dir.MountSource, sattr), name)
- i.session().overrides.addPipe(fileOps.fileState.key, childDir, inode)
+ s.overrides.addPipe(fileOps.fileState.key, childDir, inode)
return nil
}
@@ -382,11 +391,12 @@ func (i *inodeOperations) createEndpointFile(ctx context.Context, dir *fs.Inode,
// Remove implements InodeOperations.Remove.
func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error {
if len(name) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
+ s := i.session()
var key *device.MultiDeviceKey
- if i.session().overrides != nil {
+ if s.overrides != nil {
// Find out if file being deleted is a socket or pipe that needs to be
// removed from endpoint map.
if d, err := i.Lookup(ctx, dir, name); err == nil {
@@ -401,8 +411,8 @@ func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string
}
// Stabilize the override map while deletion is in progress.
- unlock := i.session().overrides.lock()
- defer unlock()
+ s.overrides.lock()
+ defer s.overrides.unlock()
}
}
}
@@ -411,7 +421,7 @@ func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string
return err
}
if key != nil {
- i.session().overrides.remove(ctx, *key)
+ s.overrides.remove(ctx, *key)
}
i.touchModificationAndStatusChangeTime(ctx, dir)
@@ -421,18 +431,20 @@ func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string
// Remove implements InodeOperations.RemoveDirectory.
func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error {
if len(name) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
// 0x200 = AT_REMOVEDIR.
if err := i.fileState.file.unlinkAt(ctx, name, 0x200); err != nil {
return err
}
- if i.session().cachePolicy.cacheUAttrs(dir) {
+
+ s := i.session()
+ if s.cachePolicy.cacheUAttrs(dir) {
// Decrease link count and updates atime.
i.cachingInodeOps.DecLinks(ctx)
}
- if i.session().cachePolicy.cacheReaddir() {
+ if s.cachePolicy.cacheReaddir() {
// Invalidate readdir cache.
i.markDirectoryDirty()
}
@@ -442,12 +454,12 @@ func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, na
// Rename renames this node.
func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {
if len(newName) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
// Don't allow renames across different mounts.
if newParent.MountSource != oldParent.MountSource {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
// Unwrap the new parent to a *inodeOperations.
@@ -462,12 +474,13 @@ func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent
}
// Is the renamed entity a directory? Fix link counts.
+ s := i.session()
if fs.IsDir(i.fileState.sattr) {
// Update cached state.
- if i.session().cachePolicy.cacheUAttrs(oldParent) {
+ if s.cachePolicy.cacheUAttrs(oldParent) {
oldParentInodeOperations.cachingInodeOps.DecLinks(ctx)
}
- if i.session().cachePolicy.cacheUAttrs(newParent) {
+ if s.cachePolicy.cacheUAttrs(newParent) {
// Only IncLinks if there is a new addition to
// newParent. If this is replacement, then the total
// count remains the same.
@@ -476,7 +489,7 @@ func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent
}
}
}
- if i.session().cachePolicy.cacheReaddir() {
+ if s.cachePolicy.cacheReaddir() {
// Mark old directory dirty.
oldParentInodeOperations.markDirectoryDirty()
if oldParent != newParent {
@@ -486,17 +499,18 @@ func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent
}
// Rename always updates ctime.
- if i.session().cachePolicy.cacheUAttrs(inode) {
+ if s.cachePolicy.cacheUAttrs(inode) {
i.cachingInodeOps.TouchStatusChangeTime(ctx)
}
return nil
}
func (i *inodeOperations) touchModificationAndStatusChangeTime(ctx context.Context, inode *fs.Inode) {
- if i.session().cachePolicy.cacheUAttrs(inode) {
+ s := i.session()
+ if s.cachePolicy.cacheUAttrs(inode) {
i.cachingInodeOps.TouchModificationAndStatusChangeTime(ctx)
}
- if i.session().cachePolicy.cacheReaddir() {
+ if s.cachePolicy.cacheReaddir() {
// Invalidate readdir cache.
i.markDirectoryDirty()
}
diff --git a/pkg/sentry/fs/gofer/session.go b/pkg/sentry/fs/gofer/session.go
index 7cf3522ff..b7debeecb 100644
--- a/pkg/sentry/fs/gofer/session.go
+++ b/pkg/sentry/fs/gofer/session.go
@@ -98,9 +98,14 @@ func (e *overrideMaps) remove(ctx context.Context, key device.MultiDeviceKey) {
// lock blocks other addition and removal operations from happening while
// the backing file is being created or deleted. Returns a function that unlocks
// the endpoint map.
-func (e *overrideMaps) lock() func() {
+// +checklocksacquire:e.mu
+func (e *overrideMaps) lock() {
e.mu.Lock()
- return func() { e.mu.Unlock() }
+}
+
+// +checklocksrelease:e.mu
+func (e *overrideMaps) unlock() {
+ e.mu.Unlock()
}
// getBoundEndpoint returns the bound endpoint mapped to the given key.
@@ -366,8 +371,8 @@ func newOverrideMaps() *overrideMaps {
// fillKeyMap populates key and dirent maps upon restore from saved pathmap.
func (s *session) fillKeyMap(ctx context.Context) error {
- unlock := s.overrides.lock()
- defer unlock()
+ s.overrides.lock()
+ defer s.overrides.unlock()
for ep, dirPath := range s.overrides.pathMap {
_, file, err := s.attach.walk(ctx, splitAbsolutePath(dirPath))
@@ -394,8 +399,8 @@ func (s *session) fillKeyMap(ctx context.Context) error {
// fillPathMap populates paths for overrides from dirents in direntMap
// before save.
func (s *session) fillPathMap(ctx context.Context) error {
- unlock := s.overrides.lock()
- defer unlock()
+ s.overrides.lock()
+ defer s.overrides.unlock()
for _, endpoint := range s.overrides.keyMap {
mountRoot := endpoint.dirent.MountRoot()
diff --git a/pkg/sentry/fs/gofer/socket.go b/pkg/sentry/fs/gofer/socket.go
index 8a1c69ac2..1fd8a0910 100644
--- a/pkg/sentry/fs/gofer/socket.go
+++ b/pkg/sentry/fs/gofer/socket.go
@@ -32,10 +32,11 @@ func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) transport.
return nil
}
- if i.session().overrides != nil {
- unlock := i.session().overrides.lock()
- defer unlock()
- ep := i.session().overrides.getBoundEndpoint(i.fileState.key)
+ s := i.session()
+ if s.overrides != nil {
+ s.overrides.lock()
+ defer s.overrides.unlock()
+ ep := s.overrides.getBoundEndpoint(i.fileState.key)
if ep != nil {
return ep
}
diff --git a/pkg/sentry/fs/host/BUILD b/pkg/sentry/fs/host/BUILD
index 3c45f6cc5..921612e9c 100644
--- a/pkg/sentry/fs/host/BUILD
+++ b/pkg/sentry/fs/host/BUILD
@@ -28,9 +28,9 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/fdnotifier",
- "//pkg/iovec",
"//pkg/log",
"//pkg/marshal/primitive",
"//pkg/refs",
@@ -40,6 +40,7 @@ go_library(
"//pkg/sentry/device",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
+ "//pkg/sentry/hostfd",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/kernel/time",
@@ -51,7 +52,6 @@ go_library(
"//pkg/sentry/uniqueid",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/unet",
"//pkg/usermem",
diff --git a/pkg/sentry/fs/host/file.go b/pkg/sentry/fs/host/file.go
index 07bd078b7..1d0d95634 100644
--- a/pkg/sentry/fs/host/file.go
+++ b/pkg/sentry/fs/host/file.go
@@ -19,6 +19,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/log"
@@ -27,7 +28,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -200,7 +200,7 @@ func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.I
writer := fd.NewReadWriter(f.iops.fileState.FD())
n, err := src.CopyInTo(ctx, safemem.FromIOWriter{writer})
if isBlockError(err) {
- err = syserror.ErrWouldBlock
+ err = linuxerr.ErrWouldBlock
}
return n, err
}
@@ -231,7 +231,7 @@ func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IO
if n != 0 {
err = nil
} else {
- err = syserror.ErrWouldBlock
+ err = linuxerr.ErrWouldBlock
}
}
return n, err
@@ -268,7 +268,7 @@ func (f *fileOperations) Flush(context.Context, *fs.File) error {
// ConfigureMMap implements fs.FileOperations.ConfigureMMap.
func (f *fileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error {
if !canMap(file.Dirent.Inode) {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
return fsutil.GenericConfigureMMap(file, f.iops.cachingInodeOps, opts)
}
diff --git a/pkg/sentry/fs/host/host.go b/pkg/sentry/fs/host/host.go
index 081ba1dd8..9f6dbd7e9 100644
--- a/pkg/sentry/fs/host/host.go
+++ b/pkg/sentry/fs/host/host.go
@@ -17,8 +17,8 @@ package host
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// filesystem is a host filesystem.
@@ -40,7 +40,7 @@ func (*filesystem) Name() string {
// Mount returns an error. Mounting hostfs is not allowed.
func (*filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, dataObj interface{}) (*fs.Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// AllowUserMount prohibits users from using mount(2) with this file system.
diff --git a/pkg/sentry/fs/host/inode.go b/pkg/sentry/fs/host/inode.go
index e299b532c..92d58e3e9 100644
--- a/pkg/sentry/fs/host/inode.go
+++ b/pkg/sentry/fs/host/inode.go
@@ -17,6 +17,7 @@ package host
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/secio"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -113,7 +113,7 @@ func (i *inodeFileState) SetMaskedAttributes(ctx context.Context, mask fs.AttrMa
return nil
}
if mask.UID || mask.GID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if mask.Perms {
if err := unix.Fchmod(i.FD(), uint32(attr.Perms.LinuxMode())); err != nil {
@@ -219,53 +219,53 @@ func (i *inodeOperations) Release(context.Context) {
// Lookup implements fs.InodeOperations.Lookup.
func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
// Create implements fs.InodeOperations.Create.
func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// CreateDirectory implements fs.InodeOperations.CreateDirectory.
func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// CreateLink implements fs.InodeOperations.CreateLink.
func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// CreateHardLink implements fs.InodeOperations.CreateHardLink.
func (*inodeOperations) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// CreateFifo implements fs.InodeOperations.CreateFifo.
func (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Remove implements fs.InodeOperations.Remove.
func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// RemoveDirectory implements fs.InodeOperations.RemoveDirectory.
func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Rename implements fs.InodeOperations.Rename.
func (i *inodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Bind implements fs.InodeOperations.Bind.
func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) {
- return nil, syserror.EOPNOTSUPP
+ return nil, linuxerr.EOPNOTSUPP
}
// BoundEndpoint implements fs.InodeOperations.BoundEndpoint.
@@ -276,7 +276,7 @@ func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) transport.
// GetFile implements fs.InodeOperations.GetFile.
func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
if fs.IsSocket(d.Inode.StableAttr) {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
return newFile(ctx, d, flags, i), nil
@@ -313,7 +313,7 @@ func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermM
// SetOwner implements fs.InodeOperations.SetOwner.
func (i *inodeOperations) SetOwner(context.Context, *fs.Inode, fs.FileOwner) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// SetPermissions implements fs.InodeOperations.SetPermissions.
@@ -392,14 +392,14 @@ func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string
// Getlink implements fs.InodeOperations.Getlink.
func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) {
if !fs.IsSymlink(i.fileState.sattr) {
- return nil, syserror.ENOLINK
+ return nil, linuxerr.ENOLINK
}
return nil, fs.ErrResolveViaReadlink
}
// StatFS implements fs.InodeOperations.StatFS.
func (i *inodeOperations) StatFS(context.Context) (fs.Info, error) {
- return fs.Info{}, syserror.ENOSYS
+ return fs.Info{}, linuxerr.ENOSYS
}
// AddLink implements fs.InodeOperations.AddLink.
diff --git a/pkg/sentry/fs/host/socket.go b/pkg/sentry/fs/host/socket.go
index 46a2dc47d..54c421775 100644
--- a/pkg/sentry/fs/host/socket.go
+++ b/pkg/sentry/fs/host/socket.go
@@ -21,6 +21,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/refs"
@@ -31,7 +32,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/unet"
"gvisor.dev/gvisor/pkg/waiter"
@@ -211,9 +211,9 @@ func (c *ConnectedEndpoint) Send(ctx context.Context, data [][]byte, controlMess
if n < totalLen && err == nil {
// The host only returns a short write if it would otherwise
// block (and only for stream sockets).
- err = syserror.EAGAIN
+ err = linuxerr.EAGAIN
}
- if n > 0 && err != syserror.EAGAIN {
+ if n > 0 && !linuxerr.Equals(linuxerr.EAGAIN, err) {
// The caller may need to block to send more data, but
// otherwise there isn't anything that can be done about an
// error with a partial write.
diff --git a/pkg/sentry/fs/host/socket_iovec.go b/pkg/sentry/fs/host/socket_iovec.go
index 7380d75e7..d98e3c6d1 100644
--- a/pkg/sentry/fs/host/socket_iovec.go
+++ b/pkg/sentry/fs/host/socket_iovec.go
@@ -16,8 +16,8 @@ package host
import (
"golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/iovec"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/sentry/hostfd"
)
// LINT.IfChange
@@ -66,13 +66,13 @@ func buildIovec(bufs [][]byte, maxlen int64, truncate bool) (length int64, iovec
if length > maxlen {
if truncate {
stopLen = maxlen
- err = syserror.EAGAIN
+ err = linuxerr.EAGAIN
} else {
- return 0, nil, nil, syserror.EMSGSIZE
+ return 0, nil, nil, linuxerr.EMSGSIZE
}
}
- if iovsRequired > iovec.MaxIovs {
+ if iovsRequired > hostfd.MaxSendRecvMsgIov {
// The kernel will reject our call if we pass this many iovs.
// Use a single intermediate buffer instead.
b := make([]byte, stopLen)
diff --git a/pkg/sentry/fs/host/tty.go b/pkg/sentry/fs/host/tty.go
index 1183727ab..4e561c5ed 100644
--- a/pkg/sentry/fs/host/tty.go
+++ b/pkg/sentry/fs/host/tty.go
@@ -17,13 +17,13 @@ package host
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -126,7 +126,7 @@ func (t *TTYFileOperations) Release(ctx context.Context) {
func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {
task := kernel.TaskFromContext(ctx)
if task == nil {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// Ignore arg[0]. This is the real FD:
@@ -167,7 +167,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO
pidns := kernel.PIDNamespaceFromContext(ctx)
if pidns == nil {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
t.mu.Lock()
@@ -191,8 +191,8 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
// drivers/tty/tty_io.c:tiocspgrp() converts -EIO from
// tty_check_change() to -ENOTTY.
- if err == syserror.EIO {
- return 0, syserror.ENOTTY
+ if linuxerr.Equals(linuxerr.EIO, err) {
+ return 0, linuxerr.ENOTTY
}
return 0, err
}
@@ -200,7 +200,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO
// Check that calling task's process group is in the TTY
// session.
if task.ThreadGroup().Session() != t.session {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
var pgIDP primitive.Int32
@@ -211,19 +211,19 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO
// pgID must be non-negative.
if pgID < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Process group with pgID must exist in this PID namespace.
pidns := task.PIDNamespace()
pg := pidns.ProcessGroupWithID(pgID)
if pg == nil {
- return 0, syserror.ESRCH
+ return 0, linuxerr.ESRCH
}
// Check that new process group is in the TTY session.
if pg.Session() != t.session {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
t.fgProcessGroup = pg
@@ -283,7 +283,7 @@ func (t *TTYFileOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO
unimpl.EmitUnimplementedEvent(ctx)
fallthrough
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
@@ -326,7 +326,7 @@ func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) e
// If the signal is SIGTTIN, then we are attempting to read
// from the TTY. Don't send the signal and return EIO.
if sig == linux.SIGTTIN {
- return syserror.EIO
+ return linuxerr.EIO
}
// Otherwise, we are writing or changing terminal state. This is allowed.
@@ -335,7 +335,7 @@ func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) e
// If the process group is an orphan, return EIO.
if pg.IsOrphan() {
- return syserror.EIO
+ return linuxerr.EIO
}
// Otherwise, send the signal to the process group and return ERESTARTSYS.
@@ -348,7 +348,7 @@ func (t *TTYFileOperations) checkChange(ctx context.Context, sig linux.Signal) e
//
// Linux ignores the result of kill_pgrp().
_ = pg.SendSignal(kernel.SignalInfoPriv(sig))
- return syserror.ERESTARTSYS
+ return linuxerr.ERESTARTSYS
}
// LINT.ThenChange(../../fsimpl/host/tty.go)
diff --git a/pkg/sentry/fs/host/util.go b/pkg/sentry/fs/host/util.go
index ab74724a3..f2a33cc14 100644
--- a/pkg/sentry/fs/host/util.go
+++ b/pkg/sentry/fs/host/util.go
@@ -19,12 +19,12 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/device"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
func nodeType(s *unix.Stat_t) fs.InodeType {
@@ -96,9 +96,9 @@ type dirInfo struct {
// LINT.IfChange
// isBlockError unwraps os errors and checks if they are caused by EAGAIN or
-// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock.
+// EWOULDBLOCK. This is so they can be transformed into linuxerr.ErrWouldBlock.
func isBlockError(err error) bool {
- if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK {
+ if linuxerr.Equals(linuxerr.EAGAIN, err) || linuxerr.Equals(linuxerr.EWOULDBLOCK, err) {
return true
}
if pe, ok := err.(*os.PathError); ok {
diff --git a/pkg/sentry/fs/host/util_amd64_unsafe.go b/pkg/sentry/fs/host/util_amd64_unsafe.go
index 21782f1da..e90629f4e 100644
--- a/pkg/sentry/fs/host/util_amd64_unsafe.go
+++ b/pkg/sentry/fs/host/util_amd64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package host
diff --git a/pkg/sentry/fs/host/util_arm64_unsafe.go b/pkg/sentry/fs/host/util_arm64_unsafe.go
index ed8f5242a..9fbb93726 100644
--- a/pkg/sentry/fs/host/util_arm64_unsafe.go
+++ b/pkg/sentry/fs/host/util_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package host
diff --git a/pkg/sentry/fs/inode.go b/pkg/sentry/fs/inode.go
index 41a3c2047..2c6b9e9db 100644
--- a/pkg/sentry/fs/inode.go
+++ b/pkg/sentry/fs/inode.go
@@ -17,6 +17,7 @@ package fs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Inode is a file system object that can be simultaneously referenced by different
@@ -298,7 +298,7 @@ func (i *Inode) RemoveXattr(ctx context.Context, d *Dirent, name string) error {
func (i *Inode) CheckPermission(ctx context.Context, p PermMask) error {
// First check the outer-most mounted filesystem.
if p.Write && i.MountSource.Flags.ReadOnly {
- return syserror.EROFS
+ return linuxerr.EROFS
}
if i.overlay != nil {
@@ -312,7 +312,7 @@ func (i *Inode) CheckPermission(ctx context.Context, p PermMask) error {
// we should not attempt to modify the writable layer if it
// is mounted read-only.
if p.Write && overlayUpperMountSource(i.MountSource).Flags.ReadOnly {
- return syserror.EROFS
+ return linuxerr.EROFS
}
}
@@ -324,7 +324,7 @@ func (i *Inode) check(ctx context.Context, p PermMask) error {
return overlayCheck(ctx, i.overlay, p)
}
if !i.InodeOperations.Check(ctx, i, p) {
- return syserror.EACCES
+ return linuxerr.EACCES
}
return nil
}
@@ -356,7 +356,7 @@ func (i *Inode) SetTimestamps(ctx context.Context, d *Dirent, ts TimeSpec) error
// Truncate calls i.InodeOperations.Truncate with i as the Inode.
func (i *Inode) Truncate(ctx context.Context, d *Dirent, size int64) error {
if IsDir(i.StableAttr) {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if i.overlay != nil {
diff --git a/pkg/sentry/fs/inode_operations.go b/pkg/sentry/fs/inode_operations.go
index 2bbfb72ef..0f8022906 100644
--- a/pkg/sentry/fs/inode_operations.go
+++ b/pkg/sentry/fs/inode_operations.go
@@ -66,7 +66,7 @@ type InodeOperations interface {
//
// * A nil Dirent and a non-nil error. If the reason that Lookup failed
// was because the name does not exist under Inode, then must return
- // syserror.ENOENT.
+ // linuxerr.ENOENT.
//
// * If name does not exist under dir and the file system wishes this
// fact to be cached, a non-nil Dirent containing a nil Inode and a
@@ -283,7 +283,7 @@ type InodeOperations interface {
//
// Any error returned from Getlink other than ErrResolveViaReadlink
// indicates the caller's inability to traverse this Inode as a link
- // (e.g. syserror.ENOLINK indicates that the Inode is not a link,
+ // (e.g. linuxerr.ENOLINK indicates that the Inode is not a link,
// syscall.EPERM indicates that traversing the link is not allowed, etc).
Getlink(context.Context, *Inode) (*Dirent, error)
diff --git a/pkg/sentry/fs/inode_overlay.go b/pkg/sentry/fs/inode_overlay.go
index e97afc626..21ad7fa69 100644
--- a/pkg/sentry/fs/inode_overlay.go
+++ b/pkg/sentry/fs/inode_overlay.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
)
func overlayHasWhiteout(ctx context.Context, parent *Inode, name string) bool {
@@ -71,7 +71,7 @@ func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name
// A file could have been created over a whiteout, so we need to
// check if something exists in the upper file system first.
child, err := parent.upper.Lookup(ctx, name)
- if err != nil && err != syserror.ENOENT {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
// We encountered an error that an overlay cannot handle,
// we must propagate it to the caller.
parent.copyMu.RUnlock()
@@ -102,7 +102,7 @@ func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name
// Upper fs is not OK with a negative Dirent
// being cached in the Dirent tree, so don't
// return one.
- return nil, false, syserror.ENOENT
+ return nil, false, linuxerr.ENOENT
}
entry, err := newOverlayEntry(ctx, upperInode, nil, false)
if err != nil {
@@ -125,7 +125,7 @@ func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name
// Check the lower file system.
child, err := parent.lower.Lookup(ctx, name)
// Same song and dance as above.
- if err != nil && err != syserror.ENOENT {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
// Don't leak resources.
if upperInode != nil {
upperInode.DecRef(ctx)
@@ -164,7 +164,7 @@ func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name
if negativeUpperChild {
return NewNegativeDirent(name), false, nil
}
- return nil, false, syserror.ENOENT
+ return nil, false, linuxerr.ENOENT
}
// Did we find a lower Inode? Remember this because we may decide we don't
@@ -343,7 +343,7 @@ func overlayRemove(ctx context.Context, o *overlayEntry, parent *Dirent, child *
return err
}
if ser.Written() != 0 {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
}
if child.Inode.overlay.upper != nil {
@@ -374,7 +374,7 @@ func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, rena
// Maybe some day we can allow the more complicated case of
// non-overlay X overlay renames, but that's not necessary right now.
if renamed.Inode.overlay == nil || newParent.Inode.overlay == nil || oldParent.Inode.overlay == nil {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if replacement {
@@ -396,7 +396,7 @@ func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, rena
// newName has been removed out from under us. That's fine;
// filesystems where that can happen must handle stale
// 'replaced'.
- if err != nil && err != syserror.ENOENT {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
return err
}
if err == nil {
@@ -420,7 +420,7 @@ func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, rena
// need to bother checking for them.
if len(children) > 0 {
replaced.DecRef(ctx)
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
}
@@ -552,7 +552,7 @@ func overlayGetXattr(ctx context.Context, o *overlayEntry, name string, size uin
// Don't forward the value of the extended attribute if it would
// unexpectedly change the behavior of a wrapping overlay layer.
if isXattrOverlay(name) {
- return "", syserror.ENODATA
+ return "", linuxerr.ENODATA
}
o.copyMu.RLock()
@@ -568,7 +568,7 @@ func overlayGetXattr(ctx context.Context, o *overlayEntry, name string, size uin
func overlaySetXattr(ctx context.Context, o *overlayEntry, d *Dirent, name, value string, flags uint32) error {
// Don't allow changes to overlay xattrs through a setxattr syscall.
if isXattrOverlay(name) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := copyUp(ctx, d); err != nil {
@@ -600,7 +600,7 @@ func overlayListXattr(ctx context.Context, o *overlayEntry, size uint64) (map[st
func overlayRemoveXattr(ctx context.Context, o *overlayEntry, d *Dirent, name string) error {
// Don't allow changes to overlay xattrs through a removexattr syscall.
if isXattrOverlay(name) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := copyUp(ctx, d); err != nil {
@@ -687,7 +687,7 @@ func overlayGetlink(ctx context.Context, o *overlayEntry) (*Dirent, error) {
dirent.DecRef(ctx)
// Claim that the path is not accessible.
- err = syserror.EACCES
+ err = linuxerr.EACCES
log.Warningf("Getlink not supported in overlay for %q", name)
}
return nil, err
diff --git a/pkg/sentry/fs/inode_overlay_test.go b/pkg/sentry/fs/inode_overlay_test.go
index aa9851b26..a3800d700 100644
--- a/pkg/sentry/fs/inode_overlay_test.go
+++ b/pkg/sentry/fs/inode_overlay_test.go
@@ -18,11 +18,11 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
)
func TestLookup(t *testing.T) {
@@ -191,11 +191,11 @@ func TestLookup(t *testing.T) {
} {
t.Run(test.desc, func(t *testing.T) {
dirent, err := test.dir.Lookup(ctx, test.name)
- if test.found && (err == syserror.ENOENT || dirent.IsNegative()) {
+ if test.found && (linuxerr.Equals(linuxerr.ENOENT, err) || dirent.IsNegative()) {
t.Fatalf("lookup %q expected to find positive dirent, got dirent %v err %v", test.name, dirent, err)
}
if !test.found {
- if err != syserror.ENOENT && !dirent.IsNegative() {
+ if !linuxerr.Equals(linuxerr.ENOENT, err) && !dirent.IsNegative() {
t.Errorf("lookup %q expected to return ENOENT or negative dirent, got dirent %v err %v", test.name, dirent, err)
}
// Nothing more to check.
@@ -389,7 +389,7 @@ func (d *dir) GetXattr(_ context.Context, _ *fs.Inode, name string, _ uint64) (s
return "y", nil
}
}
- return "", syserror.ENOATTR
+ return "", linuxerr.ENOATTR
}
// GetFile implements InodeOperations.GetFile.
diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go
index 1b83643db..51cd6cd37 100644
--- a/pkg/sentry/fs/inotify.go
+++ b/pkg/sentry/fs/inotify.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -116,23 +116,23 @@ func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask {
// Seek implements FileOperations.Seek.
func (*Inotify) Seek(context.Context, *File, SeekWhence, int64) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Readdir implements FileOperatons.Readdir.
func (*Inotify) Readdir(context.Context, *File, DentrySerializer) (int64, error) {
- return 0, syserror.ENOTDIR
+ return 0, linuxerr.ENOTDIR
}
// Write implements FileOperations.Write.
func (*Inotify) Write(context.Context, *File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Read implements FileOperations.Read.
func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ int64) (int64, error) {
if dst.NumBytes() < inotifyEventBaseSize {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
i.evMu.Lock()
@@ -140,7 +140,7 @@ func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ i
if i.events.Empty() {
// Nothing to read yet, tell caller to block.
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
var writeLen int64
@@ -156,7 +156,7 @@ func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ i
// write some events out.
return writeLen, nil
}
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Linux always dequeues an available event as long as there's enough
@@ -178,17 +178,17 @@ func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ i
// WriteTo implements FileOperations.WriteTo.
func (*Inotify) WriteTo(context.Context, *File, io.Writer, int64, bool) (int64, error) {
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// Fsync implements FileOperations.Fsync.
func (*Inotify) Fsync(context.Context, *File, int64, int64, SyncType) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// ReadFrom implements FileOperations.ReadFrom.
func (*Inotify) ReadFrom(context.Context, *File, io.Reader, int64) (int64, error) {
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// Flush implements FileOperations.Flush.
@@ -198,7 +198,7 @@ func (*Inotify) Flush(context.Context, *File) error {
// ConfigureMMap implements FileOperations.ConfigureMMap.
func (*Inotify) ConfigureMMap(context.Context, *File, *memmap.MMapOpts) error {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
// UnstableAttr implements FileOperations.UnstableAttr.
@@ -222,7 +222,7 @@ func (i *Inotify) Ioctl(ctx context.Context, _ *File, io usermem.IO, args arch.S
return 0, err
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
@@ -329,7 +329,7 @@ func (i *Inotify) RmWatch(ctx context.Context, wd int32) error {
watch, ok := i.watches[wd]
if !ok {
i.mu.Unlock()
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Remove the watch from this instance.
diff --git a/pkg/sentry/fs/mock.go b/pkg/sentry/fs/mock.go
index 1d6ea5736..fba7b961b 100644
--- a/pkg/sentry/fs/mock.go
+++ b/pkg/sentry/fs/mock.go
@@ -16,7 +16,7 @@ package fs
import (
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// MockInodeOperations implements InodeOperations for testing Inodes.
@@ -109,7 +109,7 @@ func (n *MockInodeOperations) SetPermissions(context.Context, *Inode, FilePermis
// SetOwner implements fs.InodeOperations.SetOwner.
func (*MockInodeOperations) SetOwner(context.Context, *Inode, FileOwner) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// SetTimestamps implements fs.InodeOperations.SetTimestamps.
@@ -172,5 +172,5 @@ func (n *MockInodeOperations) RemoveDirectory(context.Context, *Inode, string) e
// Getlink implements fs.InodeOperations.Getlink.
func (n *MockInodeOperations) Getlink(context.Context, *Inode) (*Dirent, error) {
- return nil, syserror.ENOLINK
+ return nil, linuxerr.ENOLINK
}
diff --git a/pkg/sentry/fs/mounts.go b/pkg/sentry/fs/mounts.go
index 243098a09..10146af4e 100644
--- a/pkg/sentry/fs/mounts.go
+++ b/pkg/sentry/fs/mounts.go
@@ -20,10 +20,10 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// DefaultTraversalLimit provides a sensible default traversal limit that may
@@ -281,7 +281,7 @@ func (mns *MountNamespace) withMountLocked(node *Dirent, fn func() error) error
// Linux allows mounting over the root (?). It comes with a strange set
// of semantics. We'll just not do this for now.
if node.parent == nil {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// For both mount and unmount, we take this lock so we can swap out the
@@ -357,7 +357,7 @@ func (mns *MountNamespace) Unmount(ctx context.Context, node *Dirent, detachOnly
orig, ok := mns.mounts[node]
if !ok {
// node is not a mount point.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if orig.previous == nil {
@@ -380,7 +380,7 @@ func (mns *MountNamespace) Unmount(ctx context.Context, node *Dirent, detachOnly
if refs := m.DirentRefs(); refs < 2 {
panic(fmt.Sprintf("have %d refs on unmount, expect 2 or more", refs))
} else if refs != 2 {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
@@ -497,7 +497,7 @@ func (mns *MountNamespace) FindLink(ctx context.Context, root, wd *Dirent, path
if current != root {
if !IsDir(current.Inode.StableAttr) {
current.DecRef(ctx) // Drop reference from above.
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := current.Inode.CheckPermission(ctx, PermMask{Execute: true}); err != nil {
current.DecRef(ctx) // Drop reference from above.
@@ -566,8 +566,8 @@ func (mns *MountNamespace) resolve(ctx context.Context, root, node *Dirent, rema
// Resolve the path.
target, err := node.Inode.Getlink(ctx)
- switch err {
- case nil:
+ switch {
+ case err == nil:
// Make sure we didn't exhaust the traversal budget.
if *remainingTraversals == 0 {
target.DecRef(ctx)
@@ -577,11 +577,11 @@ func (mns *MountNamespace) resolve(ctx context.Context, root, node *Dirent, rema
node.DecRef(ctx) // Drop the original reference.
return target, nil
- case unix.ENOLINK:
+ case linuxerr.Equals(linuxerr.ENOLINK, err):
// Not a symlink.
return node, nil
- case ErrResolveViaReadlink:
+ case err == ErrResolveViaReadlink:
defer node.DecRef(ctx) // See above.
// First, check if we should traverse.
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index f96f5a3e5..7e72e47b5 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -19,11 +19,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// The virtual filesystem implements an overlay configuration. For a high-level
@@ -218,7 +218,7 @@ func newOverlayEntry(ctx context.Context, upper *Inode, lower *Inode, lowerExist
// We don't support copying up from character devices,
// named pipes, or anything weird (like proc files).
log.Warningf("%s not supported in lower filesytem", lower.StableAttr.Type)
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
}
return &overlayEntry{
diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD
index 7af7e0b45..bc75ae505 100644
--- a/pkg/sentry/fs/proc/BUILD
+++ b/pkg/sentry/fs/proc/BUILD
@@ -30,6 +30,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/sentry/fs",
@@ -49,7 +50,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/usage",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/tcpip/header",
"//pkg/tcpip/network/ipv4",
"//pkg/usermem",
diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go
index 24426b225..75dc5d204 100644
--- a/pkg/sentry/fs/proc/exec_args.go
+++ b/pkg/sentry/fs/proc/exec_args.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -104,10 +104,10 @@ var _ fs.FileOperations = (*execArgFile)(nil)
// Read reads the exec arg from the process's address space..
func (f *execArgFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
- m, err := getTaskMM(f.t)
+ m, err := getTaskMMIncRef(f.t)
if err != nil {
return 0, err
}
diff --git a/pkg/sentry/fs/proc/fds.go b/pkg/sentry/fs/proc/fds.go
index e90da225a..e68bb46c0 100644
--- a/pkg/sentry/fs/proc/fds.go
+++ b/pkg/sentry/fs/proc/fds.go
@@ -20,12 +20,12 @@ import (
"strconv"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -37,7 +37,7 @@ func walkDescriptors(t *kernel.Task, p string, toInode func(*fs.File, kernel.FDF
n, err := strconv.ParseUint(p, 10, 64)
if err != nil {
// Not found.
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
var file *fs.File
@@ -48,7 +48,7 @@ func walkDescriptors(t *kernel.Task, p string, toInode func(*fs.File, kernel.FDF
}
})
if file == nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
return toInode(file, fdFlags), nil
}
diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go
index 91c35eea9..187e9a921 100644
--- a/pkg/sentry/fs/proc/net.go
+++ b/pkg/sentry/fs/proc/net.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -34,7 +35,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/unix"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/header"
)
@@ -291,7 +291,7 @@ func (n *netSnmp) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s
continue
}
if err := n.s.Statistics(stat, line.prefix); err != nil {
- if err == syserror.EOPNOTSUPP {
+ if linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
log.Infof("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err)
} else {
log.Warningf("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err)
diff --git a/pkg/sentry/fs/proc/proc.go b/pkg/sentry/fs/proc/proc.go
index 2f2a9f920..b9629c598 100644
--- a/pkg/sentry/fs/proc/proc.go
+++ b/pkg/sentry/fs/proc/proc.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package proc implements a partial in-memory file system for profs.
+// Package proc implements a partial in-memory file system for procfs.
package proc
import (
@@ -21,13 +21,13 @@ import (
"strconv"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -124,13 +124,13 @@ func (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {
if t := kernel.TaskFromContext(ctx); t != nil {
tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup())
if tgid == 0 {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
return strconv.FormatUint(uint64(tgid), 10), nil
}
// Who is reading this link?
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
// threadSelf is more magical than "self" link.
@@ -148,13 +148,13 @@ func (s *threadSelf) Readlink(ctx context.Context, inode *fs.Inode) (string, err
tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup())
tid := s.pidns.IDOfTask(t)
if tid == 0 || tgid == 0 {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
return fmt.Sprintf("%d/task/%d", tgid, tid), nil
}
// Who is reading this link?
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
// Lookup loads an Inode at name into a Dirent.
diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD
index 713b81e08..90bd32345 100644
--- a/pkg/sentry/fs/proc/seqfile/BUILD
+++ b/pkg/sentry/fs/proc/seqfile/BUILD
@@ -9,13 +9,13 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/fs/proc/device",
"//pkg/sentry/kernel/time",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go
index b01688b1d..77270814e 100644
--- a/pkg/sentry/fs/proc/seqfile/seqfile.go
+++ b/pkg/sentry/fs/proc/seqfile/seqfile.go
@@ -20,13 +20,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/device"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -204,7 +204,7 @@ var _ fs.FileOperations = (*seqFileOperations)(nil)
// Write implements fs.FileOperations.Write.
func (*seqFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EACCES
+ return 0, linuxerr.EACCES
}
// Read implements fs.FileOperations.Read.
diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go
index 4893af56b..71f37d582 100644
--- a/pkg/sentry/fs/proc/sys_net.go
+++ b/pkg/sentry/fs/proc/sys_net.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -592,7 +592,7 @@ func (pf *portRangeFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSe
// Port numbers must be uint16s.
if ports[0] < 0 || ports[1] < 0 || ports[0] > math.MaxUint16 || ports[1] > math.MaxUint16 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if err := pf.inode.stack.SetPortRange(uint16(ports[0]), uint16(ports[1])); err != nil {
diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go
index ae5ed25f9..03f2a882d 100644
--- a/pkg/sentry/fs/proc/task.go
+++ b/pkg/sentry/fs/proc/task.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -34,19 +35,32 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
// LINT.IfChange
-// getTaskMM returns t's MemoryManager. If getTaskMM succeeds, the MemoryManager's
-// users count is incremented, and must be decremented by the caller when it is
-// no longer in use.
-func getTaskMM(t *kernel.Task) (*mm.MemoryManager, error) {
+// getTaskMM gets the kernel task's MemoryManager. No additional reference is
+// taken on mm here. This is safe because MemoryManager.destroy is required to
+// leave the MemoryManager in a state where it's still usable as a
+// DynamicBytesSource.
+func getTaskMM(t *kernel.Task) *mm.MemoryManager {
+ var tmm *mm.MemoryManager
+ t.WithMuLocked(func(t *kernel.Task) {
+ if mm := t.MemoryManager(); mm != nil {
+ tmm = mm
+ }
+ })
+ return tmm
+}
+
+// getTaskMMIncRef returns t's MemoryManager. If getTaskMMIncRef succeeds, the
+// MemoryManager's users count is incremented, and must be decremented by the
+// caller when it is no longer in use.
+func getTaskMMIncRef(t *kernel.Task) (*mm.MemoryManager, error) {
if t.ExitState() == kernel.TaskExitDead {
- return nil, syserror.ESRCH
+ return nil, linuxerr.ESRCH
}
var m *mm.MemoryManager
t.WithMuLocked(func(t *kernel.Task) {
@@ -61,9 +75,9 @@ func getTaskMM(t *kernel.Task) (*mm.MemoryManager, error) {
func checkTaskState(t *kernel.Task) error {
switch t.ExitState() {
case kernel.TaskExitZombie:
- return syserror.EACCES
+ return linuxerr.EACCES
case kernel.TaskExitDead:
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
return nil
}
@@ -181,7 +195,7 @@ func (f *subtasksFile) Readdir(ctx context.Context, file *fs.File, ser fs.Dentry
tasks := f.t.ThreadGroup().MemberIDs(f.pidns)
if len(tasks) == 0 {
- return offset, syserror.ENOENT
+ return offset, linuxerr.ENOENT
}
if offset == 0 {
@@ -233,15 +247,15 @@ var _ fs.FileOperations = (*subtasksFile)(nil)
func (s *subtasks) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) {
tid, err := strconv.ParseUint(p, 10, 32)
if err != nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
task := s.p.pidns.TaskWithID(kernel.ThreadID(tid))
if task == nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
if task.ThreadGroup() != s.t.ThreadGroup() {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
td := s.p.newTaskDir(ctx, task, dir.MountSource, false)
@@ -269,28 +283,25 @@ func (e *exe) executable() (file fsbridge.File, err error) {
if err := checkTaskState(e.t); err != nil {
return nil, err
}
- e.t.WithMuLocked(func(t *kernel.Task) {
- mm := t.MemoryManager()
- if mm == nil {
- err = syserror.EACCES
- return
- }
+ mm := getTaskMM(e.t)
+ if mm == nil {
+ return nil, linuxerr.EACCES
+ }
- // The MemoryManager may be destroyed, in which case
- // MemoryManager.destroy will simply set the executable to nil
- // (with locks held).
- file = mm.Executable()
- if file == nil {
- err = syserror.ESRCH
- }
- })
+ // The MemoryManager may be destroyed, in which case
+ // MemoryManager.destroy will simply set the executable to nil
+ // (with locks held).
+ file = mm.Executable()
+ if file == nil {
+ err = linuxerr.ESRCH
+ }
return
}
// Readlink implements fs.InodeOperations.
func (e *exe) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {
if !kernel.ContextCanTrace(ctx, e.t, false) {
- return "", syserror.EACCES
+ return "", linuxerr.EACCES
}
// Pull out the executable for /proc/TID/exe.
@@ -323,7 +334,7 @@ func newCwd(ctx context.Context, t *kernel.Task, msrc *fs.MountSource) *fs.Inode
// Readlink implements fs.InodeOperations.
func (e *cwd) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {
if !kernel.ContextCanTrace(ctx, e.t, false) {
- return "", syserror.EACCES
+ return "", linuxerr.EACCES
}
if err := checkTaskState(e.t); err != nil {
return "", err
@@ -331,14 +342,14 @@ func (e *cwd) Readlink(ctx context.Context, inode *fs.Inode) (string, error) {
cwd := e.t.FSContext().WorkingDirectory()
if cwd == nil {
// It could have raced with process deletion.
- return "", syserror.ESRCH
+ return "", linuxerr.ESRCH
}
defer cwd.DecRef(ctx)
root := fs.RootFromContext(ctx)
if root == nil {
// It could have raced with process deletion.
- return "", syserror.ESRCH
+ return "", linuxerr.ESRCH
}
defer root.DecRef(ctx)
@@ -380,7 +391,7 @@ func (n *namespaceSymlink) Readlink(ctx context.Context, inode *fs.Inode) (strin
// Getlink implements fs.InodeOperations.Getlink.
func (n *namespaceSymlink) Getlink(ctx context.Context, inode *fs.Inode) (*fs.Dirent, error) {
if !kernel.ContextCanTrace(ctx, n.t, false) {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
if err := checkTaskState(n.t); err != nil {
return nil, err
@@ -448,7 +459,7 @@ func (m *memData) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileF
// Permission to read this file is governed by PTRACE_MODE_ATTACH_FSCREDS
// Since we dont implement setfsuid/setfsgid we can just use PTRACE_MODE_ATTACH
if !kernel.ContextCanTrace(ctx, m.t, true) {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
if err := checkTaskState(m.t); err != nil {
return nil, err
@@ -463,7 +474,7 @@ func (m *memDataFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
if dst.NumBytes() == 0 {
return 0, nil
}
- mm, err := getTaskMM(m.t)
+ mm, err := getTaskMMIncRef(m.t)
if err != nil {
return 0, nil
}
@@ -473,12 +484,12 @@ func (m *memDataFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequen
n, readErr := mm.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
if n > 0 {
if _, err := dst.CopyOut(ctx, buf[:n]); err != nil {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return int64(n), nil
}
if readErr != nil {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
return 0, nil
}
@@ -494,22 +505,9 @@ func newMaps(ctx context.Context, t *kernel.Task, msrc *fs.MountSource) *fs.Inod
return newProcInode(ctx, seqfile.NewSeqFile(ctx, &mapsData{t}), msrc, fs.SpecialFile, t)
}
-func (md *mapsData) mm() *mm.MemoryManager {
- var tmm *mm.MemoryManager
- md.t.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- // No additional reference is taken on mm here. This is safe
- // because MemoryManager.destroy is required to leave the
- // MemoryManager in a state where it's still usable as a SeqSource.
- tmm = mm
- }
- })
- return tmm
-}
-
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (md *mapsData) NeedsUpdate(generation int64) bool {
- if mm := md.mm(); mm != nil {
+ if mm := getTaskMM(md.t); mm != nil {
return mm.NeedsUpdate(generation)
}
return true
@@ -517,7 +515,7 @@ func (md *mapsData) NeedsUpdate(generation int64) bool {
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (md *mapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
- if mm := md.mm(); mm != nil {
+ if mm := getTaskMM(md.t); mm != nil {
return mm.ReadMapsSeqFileData(ctx, h)
}
return []seqfile.SeqData{}, 0
@@ -534,22 +532,9 @@ func newSmaps(ctx context.Context, t *kernel.Task, msrc *fs.MountSource) *fs.Ino
return newProcInode(ctx, seqfile.NewSeqFile(ctx, &smapsData{t}), msrc, fs.SpecialFile, t)
}
-func (sd *smapsData) mm() *mm.MemoryManager {
- var tmm *mm.MemoryManager
- sd.t.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- // No additional reference is taken on mm here. This is safe
- // because MemoryManager.destroy is required to leave the
- // MemoryManager in a state where it's still usable as a SeqSource.
- tmm = mm
- }
- })
- return tmm
-}
-
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (sd *smapsData) NeedsUpdate(generation int64) bool {
- if mm := sd.mm(); mm != nil {
+ if mm := getTaskMM(sd.t); mm != nil {
return mm.NeedsUpdate(generation)
}
return true
@@ -557,7 +542,7 @@ func (sd *smapsData) NeedsUpdate(generation int64) bool {
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (sd *smapsData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
- if mm := sd.mm(); mm != nil {
+ if mm := getTaskMM(sd.t); mm != nil {
return mm.ReadSmapsSeqFileData(ctx, h)
}
return []seqfile.SeqData{}, 0
@@ -627,12 +612,10 @@ func (s *taskStatData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle)
fmt.Fprintf(&buf, "%d ", linux.ClockTFromDuration(s.t.StartTime().Sub(s.t.Kernel().Timekeeper().BootTime())))
var vss, rss uint64
- s.t.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- }
- })
+ if mm := getTaskMM(s.t); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ }
fmt.Fprintf(&buf, "%d %d ", vss, rss/hostarch.PageSize)
// rsslim.
@@ -677,12 +660,10 @@ func (s *statmData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([
}
var vss, rss uint64
- s.t.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- }
- })
+ if mm := getTaskMM(s.t); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ }
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize)
@@ -734,12 +715,13 @@ func (s *statusData) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) (
if fdTable := t.FDTable(); fdTable != nil {
fds = fdTable.CurrentMaxFDs()
}
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- data = mm.VirtualDataSize()
- }
})
+
+ if mm := getTaskMM(s.t); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ data = mm.VirtualDataSize()
+ }
fmt.Fprintf(&buf, "FDSize:\t%d\n", fds)
fmt.Fprintf(&buf, "VmSize:\t%d kB\n", vss>>10)
fmt.Fprintf(&buf, "VmRSS:\t%d kB\n", rss>>10)
@@ -867,7 +849,7 @@ var _ fs.FileOperations = (*commFile)(nil)
// Read implements fs.FileOperations.Read.
func (f *commFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
buf := []byte(f.t.Name() + "\n")
@@ -922,10 +904,10 @@ type auxvecFile struct {
// Read implements fs.FileOperations.Read.
func (f *auxvecFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
- m, err := getTaskMM(f.t)
+ m, err := getTaskMMIncRef(f.t)
if err != nil {
return 0, err
}
@@ -1003,7 +985,7 @@ func (o *oomScoreAdj) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.F
// Read implements fs.FileOperations.Read.
func (f *oomScoreAdjFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if f.t.ExitState() == kernel.TaskExitDead {
- return 0, syserror.ESRCH
+ return 0, linuxerr.ESRCH
}
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d\n", f.t.OOMScoreAdj())
@@ -1030,7 +1012,7 @@ func (f *oomScoreAdjFile) Write(ctx context.Context, _ *fs.File, src usermem.IOS
}
if f.t.ExitState() == kernel.TaskExitDead {
- return 0, syserror.ESRCH
+ return 0, linuxerr.ESRCH
}
if err := f.t.SetOOMScoreAdj(v); err != nil {
return 0, err
diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go
index 30d5ad4cf..fcdc1e7bd 100644
--- a/pkg/sentry/fs/proc/uid_gid_map.go
+++ b/pkg/sentry/fs/proc/uid_gid_map.go
@@ -21,12 +21,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -108,7 +108,7 @@ const maxIDMapLines = 5
// Read implements fs.FileOperations.Read.
func (imfo *idMapFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var entries []auth.IDMapEntry
if imfo.iops.gids {
@@ -134,7 +134,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u
// the file ..." - user_namespaces(7)
srclen := src.NumBytes()
if srclen >= hostarch.PageSize || offset != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
b := make([]byte, srclen)
if _, err := src.CopyIn(ctx, b); err != nil {
@@ -154,7 +154,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u
}
lines := bytes.SplitN(b, []byte("\n"), maxIDMapLines+1)
if len(lines) > maxIDMapLines {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
entries := make([]auth.IDMapEntry, len(lines))
@@ -162,7 +162,7 @@ func (imfo *idMapFileOperations) Write(ctx context.Context, file *fs.File, src u
var e auth.IDMapEntry
_, err := fmt.Sscan(string(l), &e.FirstID, &e.FirstParentID, &e.Length)
if err != nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
entries[i] = e
}
diff --git a/pkg/sentry/fs/proc/uptime.go b/pkg/sentry/fs/proc/uptime.go
index c0f6fb802..ac896f963 100644
--- a/pkg/sentry/fs/proc/uptime.go
+++ b/pkg/sentry/fs/proc/uptime.go
@@ -20,10 +20,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -74,7 +74,7 @@ type uptimeFile struct {
// Read implements fs.FileOperations.Read.
func (f *uptimeFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
now := ktime.NowFromContext(ctx)
diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD
index 4a3d9636b..bfff010c5 100644
--- a/pkg/sentry/fs/ramfs/BUILD
+++ b/pkg/sentry/fs/ramfs/BUILD
@@ -14,13 +14,13 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/socket/unix/transport",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
],
diff --git a/pkg/sentry/fs/ramfs/dir.go b/pkg/sentry/fs/ramfs/dir.go
index 19990f9db..b1fadee7a 100644
--- a/pkg/sentry/fs/ramfs/dir.go
+++ b/pkg/sentry/fs/ramfs/dir.go
@@ -21,11 +21,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// CreateOps represents operations to create different file types.
@@ -178,7 +178,7 @@ func (d *Dir) Children() ([]string, map[string]fs.DentAttr) {
func (d *Dir) removeChildLocked(ctx context.Context, name string) (*fs.Inode, error) {
inode, ok := d.children[name]
if !ok {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
delete(d.children, name)
@@ -208,7 +208,7 @@ func (d *Dir) removeChildLocked(ctx context.Context, name string) (*fs.Inode, er
// Remove removes the named non-directory.
func (d *Dir) Remove(ctx context.Context, _ *fs.Inode, name string) error {
if len(name) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
d.mu.Lock()
@@ -226,7 +226,7 @@ func (d *Dir) Remove(ctx context.Context, _ *fs.Inode, name string) error {
// RemoveDirectory removes the named directory.
func (d *Dir) RemoveDirectory(ctx context.Context, _ *fs.Inode, name string) error {
if len(name) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
d.mu.Lock()
@@ -240,7 +240,7 @@ func (d *Dir) RemoveDirectory(ctx context.Context, _ *fs.Inode, name string) err
if ok, err := hasChildren(ctx, childInode); err != nil {
return err
} else if ok {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
// Child was empty. Proceed with removal.
@@ -259,7 +259,7 @@ func (d *Dir) RemoveDirectory(ctx context.Context, _ *fs.Inode, name string) err
// with a reference.
func (d *Dir) Lookup(ctx context.Context, _ *fs.Inode, p string) (*fs.Dirent, error) {
if len(p) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
d.mu.Lock()
@@ -283,16 +283,16 @@ func (d *Dir) walkLocked(ctx context.Context, p string) (*fs.Inode, error) {
return inode, nil
}
- // fs.InodeOperations.Lookup returns syserror.ENOENT if p
+ // fs.InodeOperations.Lookup returns linuxerr.ENOENT if p
// does not exist.
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
// createInodeOperationsCommon creates a new child node at this dir by calling
// makeInodeOperations. It is the common logic for creating a new child.
func (d *Dir) createInodeOperationsCommon(ctx context.Context, name string, makeInodeOperations func() (*fs.Inode, error)) (*fs.Inode, error) {
if len(name) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
d.mu.Lock()
@@ -311,7 +311,7 @@ func (d *Dir) createInodeOperationsCommon(ctx context.Context, name string, make
// Create creates a new Inode with the given name and returns its File.
func (d *Dir) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perms fs.FilePermissions) (*fs.File, error) {
if d.CreateOps == nil || d.CreateOps.NewFile == nil {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
inode, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) {
@@ -333,7 +333,7 @@ func (d *Dir) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.F
// CreateLink returns a new link.
func (d *Dir) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error {
if d.CreateOps == nil || d.CreateOps.NewSymlink == nil {
- return syserror.EACCES
+ return linuxerr.EACCES
}
_, err := d.createInodeOperationsCommon(ctx, newname, func() (*fs.Inode, error) {
return d.NewSymlink(ctx, dir, oldname)
@@ -344,7 +344,7 @@ func (d *Dir) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname st
// CreateHardLink creates a new hard link.
func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error {
if len(name) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
d.mu.Lock()
@@ -362,7 +362,7 @@ func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inod
// CreateDirectory returns a new subdirectory.
func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error {
if d.CreateOps == nil || d.CreateOps.NewDir == nil {
- return syserror.EACCES
+ return linuxerr.EACCES
}
_, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) {
return d.NewDir(ctx, dir, perms)
@@ -373,7 +373,7 @@ func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, p
// Bind implements fs.InodeOperations.Bind.
func (d *Dir) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport.BoundEndpoint, perms fs.FilePermissions) (*fs.Dirent, error) {
if d.CreateOps == nil || d.CreateOps.NewBoundEndpoint == nil {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
inode, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) {
return d.NewBoundEndpoint(ctx, dir, ep, perms)
@@ -392,7 +392,7 @@ func (d *Dir) Bind(ctx context.Context, dir *fs.Inode, name string, ep transport
// CreateFifo implements fs.InodeOperations.CreateFifo.
func (d *Dir) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error {
if d.CreateOps == nil || d.CreateOps.NewFifo == nil {
- return syserror.EACCES
+ return linuxerr.EACCES
}
_, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) {
return d.NewFifo(ctx, dir, perms)
@@ -496,14 +496,14 @@ func hasChildren(ctx context.Context, inode *fs.Inode) (bool, error) {
func Rename(ctx context.Context, oldParent fs.InodeOperations, oldName string, newParent fs.InodeOperations, newName string, replacement bool) error {
op, ok := oldParent.(*Dir)
if !ok {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
np, ok := newParent.(*Dir)
if !ok {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if len(newName) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
np.mu.Lock()
@@ -521,7 +521,7 @@ func Rename(ctx context.Context, oldParent fs.InodeOperations, oldName string, n
if ok, err := hasChildren(ctx, replaced); err != nil {
return err
} else if ok {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
}
diff --git a/pkg/sentry/fs/ramfs/socket.go b/pkg/sentry/fs/ramfs/socket.go
index d0c565879..dc9d27bb3 100644
--- a/pkg/sentry/fs/ramfs/socket.go
+++ b/pkg/sentry/fs/ramfs/socket.go
@@ -17,10 +17,10 @@ package ramfs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -64,7 +64,7 @@ func (s *Socket) BoundEndpoint(*fs.Inode, string) transport.BoundEndpoint {
// GetFile implements fs.FileOperations.GetFile.
func (s *Socket) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
// +stateify savable
diff --git a/pkg/sentry/fs/splice.go b/pkg/sentry/fs/splice.go
index 33da82868..266140f6f 100644
--- a/pkg/sentry/fs/splice.go
+++ b/pkg/sentry/fs/splice.go
@@ -19,7 +19,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// Splice moves data to this file, directly from another.
@@ -28,7 +28,7 @@ import (
func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64, error) {
// Verify basic file flag permissions.
if !dst.Flags().Write || !src.Flags().Read {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Check whether or not the objects being sliced are stream-oriented
@@ -54,26 +54,26 @@ func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64,
case dst.UniqueID < src.UniqueID:
// Acquire dst first.
if !dst.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
if !src.mu.Lock(ctx) {
dst.mu.Unlock()
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
case dst.UniqueID > src.UniqueID:
// Acquire src first.
if !src.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
if !dst.mu.Lock(ctx) {
src.mu.Unlock()
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
case dst.UniqueID == src.UniqueID:
// Acquire only one lock; it's the same file. This is a
// bit of a edge case, but presumably it's possible.
if !dst.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
srcLock = false // Only need one unlock.
}
@@ -83,13 +83,13 @@ func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64,
case dstLock:
// Acquire only dst.
if !dst.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
opts.DstStart = dst.offset // Safe: locked.
case srcLock:
// Acquire only src.
if !src.mu.Lock(ctx) {
- return 0, syserror.ErrInterrupted
+ return 0, linuxerr.ErrInterrupted
}
opts.SrcStart = src.offset // Safe: locked.
}
@@ -107,7 +107,7 @@ func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64,
limit, ok := dst.checkLimit(ctx, opts.DstStart)
switch {
case ok && limit == 0:
- err = syserror.ErrExceedsFileSizeLimit
+ err = linuxerr.ErrExceedsFileSizeLimit
case ok && limit < opts.Length:
opts.Length = limit // Cap the write.
}
@@ -139,7 +139,7 @@ func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64,
// Attempt to do a WriteTo; this is likely the most efficient.
n, err := src.FileOperations.WriteTo(ctx, src, w, opts.Length, opts.Dup)
- if n == 0 && err == syserror.ENOSYS && !opts.Dup {
+ if n == 0 && linuxerr.Equals(linuxerr.ENOSYS, err) && !opts.Dup {
// Attempt as a ReadFrom. If a WriteTo, a ReadFrom may also be
// more efficient than a copy if buffers are cached or readily
// available. (It's unlikely that they can actually be donated).
@@ -151,7 +151,7 @@ func Splice(ctx context.Context, dst *File, src *File, opts SpliceOpts) (int64,
// if we block at some point, we could lose data. If the source is
// not a pipe then reading is not destructive; if the destination
// is a regular file, then it is guaranteed not to block writing.
- if n == 0 && err == syserror.ENOSYS && !opts.Dup && (!dstPipe || !srcPipe) {
+ if n == 0 && linuxerr.Equals(linuxerr.ENOSYS, err) && !opts.Dup && (!dstPipe || !srcPipe) {
// Fallback to an in-kernel copy.
n, err = io.Copy(w, &io.LimitedReader{
R: r,
diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD
index c7977a217..e61115932 100644
--- a/pkg/sentry/fs/timerfd/BUILD
+++ b/pkg/sentry/fs/timerfd/BUILD
@@ -8,12 +8,12 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/kernel/time",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go
index c8ebe256c..1c8518d71 100644
--- a/pkg/sentry/fs/timerfd/timerfd.go
+++ b/pkg/sentry/fs/timerfd/timerfd.go
@@ -20,12 +20,12 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -121,7 +121,7 @@ func (t *TimerOperations) EventUnregister(e *waiter.Entry) {
func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
const sizeofUint64 = 8
if dst.NumBytes() < sizeofUint64 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if val := atomic.SwapUint64(&t.val, 0); val != 0 {
var buf [sizeofUint64]byte
@@ -133,12 +133,12 @@ func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.I
}
return sizeofUint64, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Write implements fs.FileOperations.Write.
func (t *TimerOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Notify implements ktime.TimerListener.Notify.
diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD
index 90398376a..511fffb43 100644
--- a/pkg/sentry/fs/tmpfs/BUILD
+++ b/pkg/sentry/fs/tmpfs/BUILD
@@ -15,6 +15,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/safemem",
"//pkg/sentry/device",
@@ -30,7 +31,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/usage",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go
index 7faa822f0..1974523bf 100644
--- a/pkg/sentry/fs/tmpfs/inode_file.go
+++ b/pkg/sentry/fs/tmpfs/inode_file.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -31,7 +32,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -150,7 +150,7 @@ func (*fileInodeOperations) Rename(ctx context.Context, inode *fs.Inode, oldPare
// GetFile implements fs.InodeOperations.GetFile.
func (f *fileInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
if fs.IsSocket(d.Inode.StableAttr) {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
if flags.Write {
@@ -217,7 +217,7 @@ func (f *fileInodeOperations) Truncate(ctx context.Context, _ *fs.Inode, size in
fallthrough
case oldSize > size && f.seals&linux.F_SEAL_SHRINK != 0: // Shrink sealed
f.dataMu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
if oldSize != size {
@@ -278,7 +278,7 @@ func (f *fileInodeOperations) Allocate(ctx context.Context, _ *fs.Inode, offset,
// Check if current seals allow growth.
if f.seals&linux.F_SEAL_GROW != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
f.attr.Size = newSize
@@ -455,13 +455,13 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes()))
if end == math.MaxInt64 {
// Overflow.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check if seals prevent either file growth or all writes.
switch {
case rw.f.seals&linux.F_SEAL_WRITE != 0: // Write sealed
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
case end > rw.f.attr.Size && rw.f.seals&linux.F_SEAL_GROW != 0: // Grow sealed
// When growth is sealed, Linux effectively allows writes which would
// normally grow the file to partially succeed up to the current EOF,
@@ -482,7 +482,7 @@ func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error)
}
if end <= rw.offset {
// Truncation would result in no data being written.
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
}
@@ -550,7 +550,7 @@ func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingS
// Reject writable mapping if F_SEAL_WRITE is set.
if f.seals&linux.F_SEAL_WRITE != 0 && writable {
- return syserror.EPERM
+ return linuxerr.EPERM
}
f.mappings.AddMapping(ms, ar, offset, writable)
@@ -655,7 +655,7 @@ func GetSeals(inode *fs.Inode) (uint32, error) {
return f.seals, nil
}
// Not a memfd inode.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// AddSeals adds new file seals to a memfd inode.
@@ -668,13 +668,13 @@ func AddSeals(inode *fs.Inode, val uint32) error {
if f.seals&linux.F_SEAL_SEAL != 0 {
// Seal applied which prevents addition of any new seals.
- return syserror.EPERM
+ return linuxerr.EPERM
}
// F_SEAL_WRITE can only be added if there are no active writable maps.
if f.seals&linux.F_SEAL_WRITE == 0 && val&linux.F_SEAL_WRITE != 0 {
if f.writableMappingPages > 0 {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
@@ -683,5 +683,5 @@ func AddSeals(inode *fs.Inode, val uint32) error {
return nil
}
// Not a memfd inode.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go
index 6aa8ff331..9a835b556 100644
--- a/pkg/sentry/fs/tmpfs/tmpfs.go
+++ b/pkg/sentry/fs/tmpfs/tmpfs.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
var fsInfo = fs.Info{
@@ -49,7 +49,7 @@ var fsInfo = fs.Info{
func rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string, replacement bool) error {
// Don't allow renames across different mounts.
if newParent.MountSource != oldParent.MountSource {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
op := oldParent.InodeOperations.(*Dir)
diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD
index 86ada820e..9e9dc06f3 100644
--- a/pkg/sentry/fs/tty/BUILD
+++ b/pkg/sentry/fs/tty/BUILD
@@ -17,6 +17,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/marshal/primitive",
"//pkg/refs",
@@ -30,7 +31,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/unimpl",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go
index 13c9dbe7d..5716e2ee9 100644
--- a/pkg/sentry/fs/tty/dir.go
+++ b/pkg/sentry/fs/tty/dir.go
@@ -22,13 +22,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -154,12 +154,12 @@ func (d *dirInodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name str
n, err := strconv.ParseUint(name, 10, 32)
if err != nil {
// Not found.
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
s, ok := d.replicas[uint32(n)]
if !ok {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
s.IncRef()
@@ -170,54 +170,54 @@ func (d *dirInodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name str
//
// Creation is never allowed.
func (d *dirInodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
// CreateDirectory implements fs.InodeOperations.CreateDirectory.
//
// Creation is never allowed.
func (d *dirInodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// CreateLink implements fs.InodeOperations.CreateLink.
//
// Creation is never allowed.
func (d *dirInodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// CreateHardLink implements fs.InodeOperations.CreateHardLink.
//
// Creation is never allowed.
func (d *dirInodeOperations) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// CreateFifo implements fs.InodeOperations.CreateFifo.
//
// Creation is never allowed.
func (d *dirInodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// Remove implements fs.InodeOperations.Remove.
//
// Removal is never allowed.
func (d *dirInodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// RemoveDirectory implements fs.InodeOperations.RemoveDirectory.
//
// Removal is never allowed.
func (d *dirInodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Bind implements fs.InodeOperations.Bind.
func (d *dirInodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data transport.BoundEndpoint, perm fs.FilePermissions) (*fs.Dirent, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// GetFile implements fs.InodeOperations.GetFile.
@@ -234,7 +234,7 @@ func (d *dirInodeOperations) allocateTerminal(ctx context.Context) (*Terminal, e
n := d.next
if n == math.MaxUint32 {
- return nil, syserror.ENOMEM
+ return nil, linuxerr.ENOMEM
}
if _, ok := d.replicas[n]; ok {
@@ -334,10 +334,10 @@ func (df *dirFileOperations) Readdir(ctx context.Context, file *fs.File, seriali
// Read implements FileOperations.Read
func (df *dirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Write implements FileOperations.Write.
func (df *dirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
diff --git a/pkg/sentry/fs/tty/fs.go b/pkg/sentry/fs/tty/fs.go
index 13f4901db..0e5916380 100644
--- a/pkg/sentry/fs/tty/fs.go
+++ b/pkg/sentry/fs/tty/fs.go
@@ -16,9 +16,9 @@ package tty
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/device"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ptsDevice is the pseudo-filesystem device.
@@ -64,7 +64,7 @@ func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSou
// No options are supported.
if data != "" {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
return newDir(ctx, fs.NewMountSource(ctx, &superOperations{}, f, flags)), nil
diff --git a/pkg/sentry/fs/tty/line_discipline.go b/pkg/sentry/fs/tty/line_discipline.go
index 3ba02c218..f9fca6d8e 100644
--- a/pkg/sentry/fs/tty/line_discipline.go
+++ b/pkg/sentry/fs/tty/line_discipline.go
@@ -20,10 +20,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -193,7 +193,7 @@ func (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSeque
}
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {
@@ -207,7 +207,7 @@ func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequ
l.replicaWaiter.Notify(waiter.ReadableEvents)
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, args arch.SyscallArguments) error {
@@ -228,7 +228,7 @@ func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequ
}
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {
@@ -242,7 +242,7 @@ func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSeq
l.masterWaiter.Notify(waiter.ReadableEvents)
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// transformer is a helper interface to make it easier to stateify queue.
diff --git a/pkg/sentry/fs/tty/master.go b/pkg/sentry/fs/tty/master.go
index 1cf869b62..88d6703a8 100644
--- a/pkg/sentry/fs/tty/master.go
+++ b/pkg/sentry/fs/tty/master.go
@@ -17,13 +17,13 @@ package tty
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -157,7 +157,7 @@ func (mf *masterFileOperations) Ioctl(ctx context.Context, file *fs.File, io use
t := kernel.TaskFromContext(ctx)
if t == nil {
// ioctl(2) may only be called from a task goroutine.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
switch cmd := args[1].Uint(); cmd {
@@ -201,7 +201,7 @@ func (mf *masterFileOperations) Ioctl(ctx context.Context, file *fs.File, io use
return mf.t.setForegroundProcessGroup(ctx, args, true /* isMaster */)
default:
maybeEmitUnimplementedEvent(ctx, cmd)
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fs/tty/queue.go b/pkg/sentry/fs/tty/queue.go
index 11d6c15d0..25d3c887e 100644
--- a/pkg/sentry/fs/tty/queue.go
+++ b/pkg/sentry/fs/tty/queue.go
@@ -17,12 +17,12 @@ package tty
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -110,7 +110,7 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl
defer q.mu.Unlock()
if !q.readable {
- return 0, false, syserror.ErrWouldBlock
+ return 0, false, linuxerr.ErrWouldBlock
}
if dst.NumBytes() > canonMaxBytes {
@@ -155,7 +155,7 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip
room := waitBufMaxBytes - q.waitBufLen
// If out of room, return EAGAIN.
if room == 0 && copyLen > 0 {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Cap the size of the wait buffer.
if copyLen > room {
diff --git a/pkg/sentry/fs/tty/replica.go b/pkg/sentry/fs/tty/replica.go
index 0e3eea3bd..ca5bc7535 100644
--- a/pkg/sentry/fs/tty/replica.go
+++ b/pkg/sentry/fs/tty/replica.go
@@ -17,12 +17,12 @@ package tty
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -142,7 +142,7 @@ func (sf *replicaFileOperations) Ioctl(ctx context.Context, file *fs.File, io us
t := kernel.TaskFromContext(ctx)
if t == nil {
// ioctl(2) may only be called from a task goroutine.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
switch cmd := args[1].Uint(); cmd {
@@ -179,7 +179,7 @@ func (sf *replicaFileOperations) Ioctl(ctx context.Context, file *fs.File, io us
return sf.si.t.setForegroundProcessGroup(ctx, args, false /* isMaster */)
default:
maybeEmitUnimplementedEvent(ctx, cmd)
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fs/user/BUILD b/pkg/sentry/fs/user/BUILD
index 66e949c95..23b5508fd 100644
--- a/pkg/sentry/fs/user/BUILD
+++ b/pkg/sentry/fs/user/BUILD
@@ -12,13 +12,13 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/log",
"//pkg/sentry/fs",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fs/user/path.go b/pkg/sentry/fs/user/path.go
index 124bc95ed..67a9adfd7 100644
--- a/pkg/sentry/fs/user/path.go
+++ b/pkg/sentry/fs/user/path.go
@@ -21,13 +21,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ResolveExecutablePath resolves the given executable name given the working
@@ -80,7 +80,7 @@ func resolve(ctx context.Context, mns *fs.MountNamespace, paths []string, name s
root := fs.RootFromContext(ctx)
if root == nil {
// Caller has no root. Don't bother traversing anything.
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
defer root.DecRef(ctx)
for _, p := range paths {
@@ -93,7 +93,7 @@ func resolve(ctx context.Context, mns *fs.MountNamespace, paths []string, name s
binPath := path.Join(p, name)
traversals := uint(linux.MaxSymlinkTraversals)
d, err := mns.FindInode(ctx, root, nil, binPath, &traversals)
- if err == syserror.ENOENT || err == syserror.EACCES {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.EACCES, err) {
// Didn't find it here.
continue
}
@@ -116,7 +116,7 @@ func resolve(ctx context.Context, mns *fs.MountNamespace, paths []string, name s
}
// Couldn't find it.
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
func resolveVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNamespace, paths []string, name string) (string, error) {
@@ -142,7 +142,7 @@ func resolveVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNam
Flags: linux.O_RDONLY,
}
dentry, err := root.Mount().Filesystem().VirtualFilesystem().OpenAt(ctx, creds, pop, opts)
- if err == syserror.ENOENT || err == syserror.EACCES {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.EACCES, err) {
// Didn't find it here.
continue
}
@@ -155,7 +155,7 @@ func resolveVFS2(ctx context.Context, creds *auth.Credentials, mns *vfs.MountNam
}
// Couldn't find it.
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
// getPath returns the PATH as a slice of strings given the environment
diff --git a/pkg/sentry/fsbridge/BUILD b/pkg/sentry/fsbridge/BUILD
index 6c798f0bd..4631db2bb 100644
--- a/pkg/sentry/fsbridge/BUILD
+++ b/pkg/sentry/fsbridge/BUILD
@@ -13,12 +13,12 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/sentry/fs",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/memmap",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsbridge/fs.go b/pkg/sentry/fsbridge/fs.go
index 9785fd62a..527bde181 100644
--- a/pkg/sentry/fsbridge/fs.go
+++ b/pkg/sentry/fsbridge/fs.go
@@ -20,10 +20,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -127,7 +127,7 @@ func (l *fsLookup) OpenPath(ctx context.Context, path string, opts vfs.OpenOptio
defer d.DecRef(ctx)
if !resolveFinal && fs.IsSymlink(d.Inode.StableAttr) {
- return nil, syserror.ELOOP
+ return nil, linuxerr.ELOOP
}
fsPerm := openOptionsToPermMask(&opts)
@@ -138,13 +138,13 @@ func (l *fsLookup) OpenPath(ctx context.Context, path string, opts vfs.OpenOptio
// If they claim it's a directory, then make sure.
if strings.HasSuffix(path, "/") {
if d.Inode.StableAttr.Type != fs.Directory {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
}
if opts.FileExec && d.Inode.StableAttr.Type != fs.RegularFile {
ctx.Infof("%q is not a regular file: %v", path, d.Inode.StableAttr.Type)
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
f, err := d.Inode.GetFile(ctx, d, flagsToFileFlags(opts.Flags))
diff --git a/pkg/sentry/fsimpl/cgroupfs/BUILD b/pkg/sentry/fsimpl/cgroupfs/BUILD
index 37efb641a..e5fdcc776 100644
--- a/pkg/sentry/fsimpl/cgroupfs/BUILD
+++ b/pkg/sentry/fsimpl/cgroupfs/BUILD
@@ -31,6 +31,8 @@ go_library(
"//pkg/abi/linux",
"//pkg/context",
"//pkg/coverage",
+ "//pkg/errors/linuxerr",
+ "//pkg/fspath",
"//pkg/log",
"//pkg/refs",
"//pkg/refsvfs2",
@@ -42,7 +44,6 @@ go_library(
"//pkg/sentry/usage",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsimpl/cgroupfs/base.go b/pkg/sentry/fsimpl/cgroupfs/base.go
index fe9871bdd..71bb0a9c8 100644
--- a/pkg/sentry/fsimpl/cgroupfs/base.go
+++ b/pkg/sentry/fsimpl/cgroupfs/base.go
@@ -23,10 +23,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -88,7 +88,6 @@ type controller interface {
// +stateify savable
type cgroupInode struct {
dir
- fs *filesystem
// ts is the list of tasks in this cgroup. The kernel is responsible for
// removing tasks from this list before they're destroyed, so any tasks on
@@ -102,9 +101,10 @@ var _ kernel.CgroupImpl = (*cgroupInode)(nil)
func (fs *filesystem) newCgroupInode(ctx context.Context, creds *auth.Credentials) kernfs.Inode {
c := &cgroupInode{
- fs: fs,
- ts: make(map[*kernel.Task]struct{}),
+ dir: dir{fs: fs},
+ ts: make(map[*kernel.Task]struct{}),
}
+ c.dir.cgi = c
contents := make(map[string]kernfs.Inode)
contents["cgroup.procs"] = fs.newControllerFile(ctx, creds, &cgroupProcsData{c})
@@ -115,8 +115,7 @@ func (fs *filesystem) newCgroupInode(ctx context.Context, creds *auth.Credential
}
c.dir.InodeAttrs.Init(ctx, creds, linux.UNNAMED_MAJOR, fs.devMinor, fs.NextIno(), linux.ModeDirectory|linux.FileMode(0555))
- c.dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{})
- c.dir.InitRefs()
+ c.dir.OrderedChildren.Init(kernfs.OrderedChildrenOptions{Writable: true})
c.dir.IncLinks(c.dir.OrderedChildren.Populate(contents))
atomic.AddUint64(&fs.numCgroups, 1)
@@ -253,7 +252,7 @@ func parseInt64FromString(ctx context.Context, src usermem.IOSequence, offset in
// Note: This also handles zero-len writes if offset is beyond the end
// of src, or src is empty.
ctx.Warningf("cgroupfs.parseInt64FromString: failed to parse %q: %v", string(buf), err)
- return 0, int64(n), syserror.EINVAL
+ return 0, int64(n), linuxerr.EINVAL
}
return val, int64(n), nil
diff --git a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
index 05d7eb4ce..edc3b50b9 100644
--- a/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
+++ b/pkg/sentry/fsimpl/cgroupfs/cgroupfs.go
@@ -32,7 +32,8 @@
// controllers associated with them.
//
// Since cgroupfs doesn't allow hardlinks, there is a unique mapping between
-// cgroupfs dentries and inodes.
+// cgroupfs dentries and inodes. Thus, cgroupfs inodes don't need to be ref
+// counted and exist until they're unlinked once or the FS is destroyed.
//
// # Synchronization
//
@@ -48,10 +49,11 @@
// Lock order:
//
// kernel.CgroupRegistry.mu
-// cgroupfs.filesystem.mu
-// kernel.TaskSet.mu
-// kernel.Task.mu
-// cgroupfs.filesystem.tasksMu.
+// kernfs.filesystem.mu
+// kernel.TaskSet.mu
+// kernel.Task.mu
+// cgroupfs.filesystem.tasksMu.
+// cgroupfs.dir.OrderedChildren.mu
package cgroupfs
import (
@@ -62,12 +64,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -108,6 +111,7 @@ type FilesystemType struct{}
// +stateify savable
type InternalData struct {
DefaultControlValues map[string]int64
+ InitialCgroupPath string
}
// filesystem implements vfs.FilesystemImpl and kernel.cgroupFS.
@@ -134,6 +138,11 @@ type filesystem struct {
numCgroups uint64 // Protected by atomic ops.
root *kernfs.Dentry
+ // effectiveRoot is the initial cgroup new tasks are created in. Unless
+ // overwritten by internal mount options, root == effectiveRoot. If
+ // effectiveRoot != root, an extra reference is held on effectiveRoot for
+ // the lifetime of the filesystem.
+ effectiveRoot *kernfs.Dentry
// tasksMu serializes task membership changes across all cgroups within a
// filesystem.
@@ -167,7 +176,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
maxCachedDentries, err = strconv.ParseUint(str, 10, 64)
if err != nil {
ctx.Warningf("sys.FilesystemType.GetFilesystem: invalid dentry cache limit: dentry_cache_limit=%s", str)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
}
@@ -195,7 +204,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if _, ok := mopts["all"]; ok {
if len(wantControllers) > 0 {
ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: other controllers specified with all: %v", wantControllers)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
delete(mopts, "all")
@@ -209,7 +218,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if len(mopts) != 0 {
ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: unknown options: %v", mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
k := kernel.KernelFromContext(ctx)
@@ -229,6 +238,9 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
fs := vfsfs.Impl().(*filesystem)
ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: mounting new view to hierarchy %v", fs.hierarchyID)
fs.root.IncRef()
+ if fs.effectiveRoot != fs.root {
+ fs.effectiveRoot.IncRef()
+ }
return vfsfs, fs.root.VFSDentry(), nil
}
@@ -245,8 +257,8 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
var defaults map[string]int64
if opts.InternalData != nil {
- ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: default control values: %v", defaults)
defaults = opts.InternalData.(*InternalData).DefaultControlValues
+ ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: default control values: %v", defaults)
}
for _, ty := range wantControllers {
@@ -286,6 +298,14 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
var rootD kernfs.Dentry
rootD.InitRoot(&fs.Filesystem, root)
fs.root = &rootD
+ fs.effectiveRoot = fs.root
+
+ if err := fs.prepareInitialCgroup(ctx, vfsObj, opts); err != nil {
+ ctx.Warningf("cgroupfs.FilesystemType.GetFilesystem: failed to prepare initial cgroup: %v", err)
+ rootD.DecRef(ctx)
+ fs.VFSFilesystem().DecRef(ctx)
+ return nil, nil, err
+ }
// Register controllers. The registry may be modified concurrently, so if we
// get an error, we raced with someone else who registered the same
@@ -294,7 +314,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
ctx.Infof("cgroupfs.FilesystemType.GetFilesystem: failed to register new hierarchy with controllers %v: %v", wantControllers, err)
rootD.DecRef(ctx)
fs.VFSFilesystem().DecRef(ctx)
- return nil, nil, syserror.EBUSY
+ return nil, nil, linuxerr.EBUSY
}
// Move all existing tasks to the root of the new hierarchy.
@@ -303,10 +323,47 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
return fs.VFSFilesystem(), rootD.VFSDentry(), nil
}
+// prepareInitialCgroup creates the initial cgroup according to opts. An initial
+// cgroup is optional, and if not specified, this function is a no-op.
+func (fs *filesystem) prepareInitialCgroup(ctx context.Context, vfsObj *vfs.VirtualFilesystem, opts vfs.GetFilesystemOptions) error {
+ if opts.InternalData == nil {
+ return nil
+ }
+ initPathStr := opts.InternalData.(*InternalData).InitialCgroupPath
+ if initPathStr == "" {
+ return nil
+ }
+ ctx.Debugf("cgroupfs.FilesystemType.GetFilesystem: initial cgroup path: %v", initPathStr)
+ initPath := fspath.Parse(initPathStr)
+ if !initPath.Absolute || !initPath.HasComponents() {
+ ctx.Warningf("cgroupfs.FilesystemType.GetFilesystem: initial cgroup path invalid: %+v", initPath)
+ return linuxerr.EINVAL
+ }
+
+ // Have initial cgroup target, create the tree.
+ cgDir := fs.root.Inode().(*cgroupInode)
+ for pit := initPath.Begin; pit.Ok(); pit = pit.Next() {
+ cgDirI, err := cgDir.NewDir(ctx, pit.String(), vfs.MkdirOptions{})
+ if err != nil {
+ return err
+ }
+ cgDir = cgDirI.(*cgroupInode)
+ }
+
+ // Walk to target dentry.
+ initDentry, err := fs.root.WalkDentryTree(ctx, vfsObj, initPath)
+ if err != nil {
+ ctx.Warningf("cgroupfs.FilesystemType.GetFilesystem: initial cgroup dentry not found: %v", err)
+ return linuxerr.ENOENT
+ }
+ fs.effectiveRoot = initDentry // Reference from WalkDentryTree transferred here.
+ return nil
+}
+
func (fs *filesystem) rootCgroup() kernel.Cgroup {
return kernel.Cgroup{
- Dentry: fs.root,
- CgroupImpl: fs.root.Inode().(kernel.CgroupImpl),
+ Dentry: fs.effectiveRoot,
+ CgroupImpl: fs.effectiveRoot.Inode().(kernel.CgroupImpl),
}
}
@@ -320,6 +377,10 @@ func (fs *filesystem) Release(ctx context.Context) {
r.Unregister(fs.hierarchyID)
}
+ if fs.root != fs.effectiveRoot {
+ fs.effectiveRoot.DecRef(ctx)
+ }
+
fs.Filesystem.VFSFilesystem().VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
fs.Filesystem.Release(ctx)
}
@@ -346,15 +407,18 @@ func (*implStatFS) StatFS(context.Context, *vfs.Filesystem) (linux.Statfs, error
//
// +stateify savable
type dir struct {
- dirRefs
+ kernfs.InodeNoopRefCount
kernfs.InodeAlwaysValid
kernfs.InodeAttrs
kernfs.InodeNotSymlink
- kernfs.InodeDirectoryNoNewChildren // TODO(b/183137098): Implement mkdir.
+ kernfs.InodeDirectoryNoNewChildren
kernfs.OrderedChildren
implStatFS
locks vfs.FileLocks
+
+ fs *filesystem // Immutable.
+ cgi *cgroupInode // Immutable.
}
// Keep implements kernfs.Inode.Keep.
@@ -364,7 +428,7 @@ func (*dir) Keep() bool {
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*dir) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Open implements kernfs.Inode.Open.
@@ -378,14 +442,100 @@ func (d *dir) Open(ctx context.Context, rp *vfs.ResolvingPath, kd *kernfs.Dentry
return fd.VFSFileDescription(), nil
}
-// DecRef implements kernfs.Inode.DecRef.
-func (d *dir) DecRef(ctx context.Context) {
- d.dirRefs.DecRef(func() { d.Destroy(ctx) })
+// NewDir implements kernfs.Inode.NewDir.
+func (d *dir) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (kernfs.Inode, error) {
+ // "Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable."
+ // -- Linux, kernel/cgroup.c:cgroup_mkdir().
+ if strings.Contains(name, "\n") {
+ return nil, linuxerr.EINVAL
+ }
+ return d.OrderedChildren.Inserter(name, func() kernfs.Inode {
+ d.IncLinks(1)
+ return d.fs.newCgroupInode(ctx, auth.CredentialsFromContext(ctx))
+ })
}
-// StatFS implements kernfs.Inode.StatFS.
-func (d *dir) StatFS(ctx context.Context, fs *vfs.Filesystem) (linux.Statfs, error) {
- return vfs.GenericStatFS(linux.CGROUP_SUPER_MAGIC), nil
+// Rename implements kernfs.Inode.Rename. Cgroupfs only allows renaming of
+// cgroup directories, and the rename may only change the name within the same
+// parent. See linux, kernel/cgroup.c:cgroup_rename().
+func (d *dir) Rename(ctx context.Context, oldname, newname string, child, dst kernfs.Inode) error {
+ if _, ok := child.(*cgroupInode); !ok {
+ // Not a cgroup directory. Control files are backed by different types.
+ return linuxerr.ENOTDIR
+ }
+
+ dstCGInode, ok := dst.(*cgroupInode)
+ if !ok {
+ // Not a cgroup inode, so definitely can't be *this* inode.
+ return linuxerr.EIO
+ }
+ // Note: We're intentionally comparing addresses, since two different dirs
+ // could plausibly be identical in memory, but would occupy different
+ // locations in memory.
+ if d != &dstCGInode.dir {
+ // Destination dir is a different cgroup inode. Cross directory renames
+ // aren't allowed.
+ return linuxerr.EIO
+ }
+
+ // Rename moves oldname to newname within d. Proceed.
+ return d.OrderedChildren.Rename(ctx, oldname, newname, child, dst)
+}
+
+// Unlink implements kernfs.Inode.Unlink. Cgroupfs disallows unlink, as the only
+// files in the filesystem are control files, which can't be deleted.
+func (d *dir) Unlink(ctx context.Context, name string, child kernfs.Inode) error {
+ return linuxerr.EPERM
+}
+
+// hasChildrenLocked returns whether the cgroup dir contains any objects that
+// prevent it from being deleted.
+func (d *dir) hasChildrenLocked() bool {
+ // Subdirs take a link on the parent, so checks if there are any direct
+ // children cgroups. Exclude the dir's self link and the link from ".".
+ if d.InodeAttrs.Links()-2 > 0 {
+ return true
+ }
+ return len(d.cgi.ts) > 0
+}
+
+// HasChildren implements kernfs.Inode.HasChildren.
+//
+// The empty check for a cgroupfs directory is unlike a regular directory since
+// a cgroupfs directory will always have control files. A cgroupfs directory can
+// be deleted if cgroup contains no tasks and has no sub-cgroups.
+func (d *dir) HasChildren() bool {
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
+ return d.hasChildrenLocked()
+}
+
+// RmDir implements kernfs.Inode.RmDir.
+func (d *dir) RmDir(ctx context.Context, name string, child kernfs.Inode) error {
+ // Unlike a normal directory, we need to recheck if d is empty again, since
+ // vfs/kernfs can't stop tasks from entering or leaving the cgroup.
+ d.fs.tasksMu.RLock()
+ defer d.fs.tasksMu.RUnlock()
+
+ cgi, ok := child.(*cgroupInode)
+ if !ok {
+ return linuxerr.ENOTDIR
+ }
+ if cgi.dir.hasChildrenLocked() {
+ return linuxerr.ENOTEMPTY
+ }
+
+ // Disallow deletion of the effective root cgroup.
+ if cgi == d.fs.effectiveRoot.Inode().(*cgroupInode) {
+ ctx.Warningf("Cannot delete initial cgroup for new tasks %q", d.fs.effectiveRoot.FSLocalPath())
+ return linuxerr.EBUSY
+ }
+
+ err := d.OrderedChildren.RmDir(ctx, name, child)
+ if err == nil {
+ d.InodeAttrs.DecLinks()
+ }
+ return err
}
// controllerFile represents a generic control file that appears within a cgroup
diff --git a/pkg/sentry/fsimpl/cgroupfs/memory.go b/pkg/sentry/fsimpl/cgroupfs/memory.go
index 485c98376..d880c9bc4 100644
--- a/pkg/sentry/fsimpl/cgroupfs/memory.go
+++ b/pkg/sentry/fsimpl/cgroupfs/memory.go
@@ -31,22 +31,34 @@ import (
type memoryController struct {
controllerCommon
- limitBytes int64
+ limitBytes int64
+ softLimitBytes int64
+ moveChargeAtImmigrate int64
}
var _ controller = (*memoryController)(nil)
func newMemoryController(fs *filesystem, defaults map[string]int64) *memoryController {
c := &memoryController{
- // Linux sets this to (PAGE_COUNTER_MAX * PAGE_SIZE) by default, which
- // is ~ 2**63 on a 64-bit system. So essentially, inifinity. The exact
- // value isn't very important.
- limitBytes: math.MaxInt64,
+ // Linux sets these limits to (PAGE_COUNTER_MAX * PAGE_SIZE) by default,
+ // which is ~ 2**63 on a 64-bit system. So essentially, inifinity. The
+ // exact value isn't very important.
+
+ limitBytes: math.MaxInt64,
+ softLimitBytes: math.MaxInt64,
}
- if val, ok := defaults["memory.limit_in_bytes"]; ok {
- c.limitBytes = val
- delete(defaults, "memory.limit_in_bytes")
+
+ consumeDefault := func(name string, valPtr *int64) {
+ if val, ok := defaults[name]; ok {
+ *valPtr = val
+ delete(defaults, name)
+ }
}
+
+ consumeDefault("memory.limit_in_bytes", &c.limitBytes)
+ consumeDefault("memory.soft_limit_in_bytes", &c.softLimitBytes)
+ consumeDefault("memory.move_charge_at_immigrate", &c.moveChargeAtImmigrate)
+
c.controllerCommon.init(controllerMemory, fs)
return c
}
@@ -55,6 +67,8 @@ func newMemoryController(fs *filesystem, defaults map[string]int64) *memoryContr
func (c *memoryController) AddControlFiles(ctx context.Context, creds *auth.Credentials, _ *cgroupInode, contents map[string]kernfs.Inode) {
contents["memory.usage_in_bytes"] = c.fs.newControllerFile(ctx, creds, &memoryUsageInBytesData{})
contents["memory.limit_in_bytes"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf("%d\n", c.limitBytes))
+ contents["memory.soft_limit_in_bytes"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf("%d\n", c.softLimitBytes))
+ contents["memory.move_charge_at_immigrate"] = c.fs.newStaticControllerFile(ctx, creds, linux.FileMode(0644), fmt.Sprintf("%d\n", c.moveChargeAtImmigrate))
}
// +stateify savable
diff --git a/pkg/sentry/fsimpl/devpts/BUILD b/pkg/sentry/fsimpl/devpts/BUILD
index 6af3c3781..e0b879339 100644
--- a/pkg/sentry/fsimpl/devpts/BUILD
+++ b/pkg/sentry/fsimpl/devpts/BUILD
@@ -29,6 +29,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/log",
"//pkg/marshal",
"//pkg/marshal/primitive",
@@ -44,7 +45,6 @@ go_library(
"//pkg/sentry/unimpl",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
@@ -59,5 +59,6 @@ go_test(
"//pkg/abi/linux",
"//pkg/sentry/contexttest",
"//pkg/usermem",
+ "//pkg/waiter",
],
)
diff --git a/pkg/sentry/fsimpl/devpts/devpts.go b/pkg/sentry/fsimpl/devpts/devpts.go
index e75954105..e711debcb 100644
--- a/pkg/sentry/fsimpl/devpts/devpts.go
+++ b/pkg/sentry/fsimpl/devpts/devpts.go
@@ -25,10 +25,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Name is the filesystem name.
@@ -56,7 +56,7 @@ func (*FilesystemType) Name() string {
func (fstype *FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
// No data allowed.
if opts.Data != "" {
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fstype.initOnce.Do(func() {
@@ -179,7 +179,7 @@ func (i *rootInode) allocateTerminal(ctx context.Context, creds *auth.Credential
i.mu.Lock()
defer i.mu.Unlock()
if i.nextIdx == math.MaxUint32 {
- return nil, syserror.ENOMEM
+ return nil, linuxerr.ENOMEM
}
idx := i.nextIdx
i.nextIdx++
@@ -240,7 +240,7 @@ func (i *rootInode) Lookup(ctx context.Context, name string) (kernfs.Inode, erro
// Not a static entry.
idx, err := strconv.ParseUint(name, 10, 32)
if err != nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
i.mu.Lock()
defer i.mu.Unlock()
@@ -249,7 +249,7 @@ func (i *rootInode) Lookup(ctx context.Context, name string) (kernfs.Inode, erro
return ri, nil
}
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
// IterDirents implements kernfs.Inode.IterDirents.
diff --git a/pkg/sentry/fsimpl/devpts/devpts_test.go b/pkg/sentry/fsimpl/devpts/devpts_test.go
index 448390cfe..1ef07d702 100644
--- a/pkg/sentry/fsimpl/devpts/devpts_test.go
+++ b/pkg/sentry/fsimpl/devpts/devpts_test.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/usermem"
+ "gvisor.dev/gvisor/pkg/waiter"
)
func TestSimpleMasterToReplica(t *testing.T) {
@@ -54,3 +55,36 @@ func TestSimpleMasterToReplica(t *testing.T) {
t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
}
}
+
+type callback func(*waiter.Entry, waiter.EventMask)
+
+func (cb callback) Callback(entry *waiter.Entry, mask waiter.EventMask) {
+ cb(entry, mask)
+}
+
+func TestEchoDeadlock(t *testing.T) {
+ ctx := contexttest.Context(t)
+ termios := linux.DefaultReplicaTermios
+ termios.LocalFlags |= linux.ECHO
+ ld := newLineDiscipline(termios)
+ outBytes := make([]byte, 32)
+ dst := usermem.BytesIOSequence(outBytes)
+ entry := &waiter.Entry{Callback: callback(func(*waiter.Entry, waiter.EventMask) {
+ ld.inputQueueRead(ctx, dst)
+ })}
+ ld.masterWaiter.EventRegister(entry, waiter.ReadableEvents)
+ defer ld.masterWaiter.EventUnregister(entry)
+ inBytes := []byte("hello, tty\n")
+ n, err := ld.inputQueueWrite(ctx, usermem.BytesIOSequence(inBytes))
+ if err != nil {
+ t.Fatalf("inputQueueWrite: %v", err)
+ }
+ if int(n) != len(inBytes) {
+ t.Fatalf("read wrong length: got %d, want %d", n, len(inBytes))
+ }
+ outStr := string(outBytes[:n])
+ inStr := string(inBytes)
+ if outStr != inStr {
+ t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr)
+ }
+}
diff --git a/pkg/sentry/fsimpl/devpts/line_discipline.go b/pkg/sentry/fsimpl/devpts/line_discipline.go
index e94a5bac3..609623f9f 100644
--- a/pkg/sentry/fsimpl/devpts/line_discipline.go
+++ b/pkg/sentry/fsimpl/devpts/line_discipline.go
@@ -20,10 +20,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -70,6 +70,10 @@ const (
// +------------------------| output queue |<--------------------------+
// (outputQueueRead) +--------------+ (outputQueueWrite)
//
+// There is special handling for the ECHO option, where bytes written to the
+// input queue are also output back to the terminal by being written to
+// l.outQueue by the input queue transformer.
+//
// Lock order:
// termiosMu
// inQueue.mu
@@ -126,7 +130,6 @@ func (l *lineDiscipline) getTermios(task *kernel.Task, args arch.SyscallArgument
// setTermios sets a linux.Termios for the tty.
func (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArguments) (uintptr, error) {
l.termiosMu.Lock()
- defer l.termiosMu.Unlock()
oldCanonEnabled := l.termios.LEnabled(linux.ICANON)
// We must copy a Termios struct, not KernelTermios.
var t linux.Termios
@@ -141,7 +144,10 @@ func (l *lineDiscipline) setTermios(task *kernel.Task, args arch.SyscallArgument
l.inQueue.pushWaitBufLocked(l)
l.inQueue.readable = true
l.inQueue.mu.Unlock()
+ l.termiosMu.Unlock()
l.replicaWaiter.Notify(waiter.ReadableEvents)
+ } else {
+ l.termiosMu.Unlock()
}
return 0, err
@@ -179,33 +185,42 @@ func (l *lineDiscipline) inputQueueReadSize(t *kernel.Task, io usermem.IO, args
func (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {
l.termiosMu.RLock()
- defer l.termiosMu.RUnlock()
- n, pushed, err := l.inQueue.read(ctx, dst, l)
+ n, pushed, notifyEcho, err := l.inQueue.read(ctx, dst, l)
+ l.termiosMu.RUnlock()
if err != nil {
return 0, err
}
if n > 0 {
- l.masterWaiter.Notify(waiter.WritableEvents)
+ if notifyEcho {
+ l.masterWaiter.Notify(waiter.ReadableEvents | waiter.WritableEvents)
+ } else {
+ l.masterWaiter.Notify(waiter.WritableEvents)
+ }
if pushed {
l.replicaWaiter.Notify(waiter.ReadableEvents)
}
return n, nil
+ } else if notifyEcho {
+ l.masterWaiter.Notify(waiter.ReadableEvents)
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {
l.termiosMu.RLock()
- defer l.termiosMu.RUnlock()
- n, err := l.inQueue.write(ctx, src, l)
+ n, notifyEcho, err := l.inQueue.write(ctx, src, l)
+ l.termiosMu.RUnlock()
if err != nil {
return 0, err
}
+ if notifyEcho {
+ l.masterWaiter.Notify(waiter.ReadableEvents)
+ }
if n > 0 {
l.replicaWaiter.Notify(waiter.ReadableEvents)
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, io usermem.IO, args arch.SyscallArguments) error {
@@ -214,8 +229,9 @@ func (l *lineDiscipline) outputQueueReadSize(t *kernel.Task, io usermem.IO, args
func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) {
l.termiosMu.RLock()
- defer l.termiosMu.RUnlock()
- n, pushed, err := l.outQueue.read(ctx, dst, l)
+ // Ignore notifyEcho, as it cannot happen when reading from the output queue.
+ n, pushed, _, err := l.outQueue.read(ctx, dst, l)
+ l.termiosMu.RUnlock()
if err != nil {
return 0, err
}
@@ -226,13 +242,14 @@ func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequ
}
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) {
l.termiosMu.RLock()
- defer l.termiosMu.RUnlock()
- n, err := l.outQueue.write(ctx, src, l)
+ // Ignore notifyEcho, as it cannot happen when writing to the output queue.
+ n, _, err := l.outQueue.write(ctx, src, l)
+ l.termiosMu.RUnlock()
if err != nil {
return 0, err
}
@@ -240,13 +257,14 @@ func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSeq
l.masterWaiter.Notify(waiter.ReadableEvents)
return n, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// transformer is a helper interface to make it easier to stateify queue.
type transformer interface {
// transform functions require queue's mutex to be held.
- transform(*lineDiscipline, *queue, []byte) int
+ // The boolean indicates whether there was any echoed bytes.
+ transform(*lineDiscipline, *queue, []byte) (int, bool)
}
// outputQueueTransformer implements transformer. It performs line discipline
@@ -261,7 +279,7 @@ type outputQueueTransformer struct{}
// Preconditions:
// * l.termiosMu must be held for reading.
// * q.mu must be held.
-func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {
+func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) (int, bool) {
// transformOutput is effectively always in noncanonical mode, as the
// master termios never has ICANON set.
@@ -270,7 +288,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
if len(q.readBuf) > 0 {
q.readable = true
}
- return len(buf)
+ return len(buf), false
}
var ret int
@@ -321,7 +339,7 @@ func (*outputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte
if len(q.readBuf) > 0 {
q.readable = true
}
- return ret
+ return ret, false
}
// inputQueueTransformer implements transformer. It performs line discipline
@@ -334,15 +352,17 @@ type inputQueueTransformer struct{}
// transformed according to flags set in the termios struct. See
// drivers/tty/n_tty.c:n_tty_receive_char_special for an analogous kernel
// function.
+// It returns an extra boolean indicating whether any characters need to be
+// echoed, in which case we need to notify readers.
//
// Preconditions:
// * l.termiosMu must be held for reading.
// * q.mu must be held.
-func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) int {
+func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte) (int, bool) {
// If there's a line waiting to be read in canonical mode, don't write
// anything else to the read buffer.
if l.termios.LEnabled(linux.ICANON) && q.readable {
- return 0
+ return 0, false
}
maxBytes := nonCanonMaxBytes
@@ -351,6 +371,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
}
var ret int
+ var notifyEcho bool
for len(buf) > 0 && len(q.readBuf) < canonMaxBytes {
size := l.peek(buf)
cBytes := append([]byte{}, buf[:size]...)
@@ -397,7 +418,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
// Anything written to the readBuf will have to be echoed.
if l.termios.LEnabled(linux.ECHO) {
l.outQueue.writeBytes(cBytes, l)
- l.masterWaiter.Notify(waiter.ReadableEvents)
+ notifyEcho = true
}
// If we finish a line, make it available for reading.
@@ -412,7 +433,7 @@ func (*inputQueueTransformer) transform(l *lineDiscipline, q *queue, buf []byte)
q.readable = true
}
- return ret
+ return ret, notifyEcho
}
// shouldDiscard returns whether c should be discarded. In canonical mode, if
diff --git a/pkg/sentry/fsimpl/devpts/master.go b/pkg/sentry/fsimpl/devpts/master.go
index 93c031c89..9a1a245dc 100644
--- a/pkg/sentry/fsimpl/devpts/master.go
+++ b/pkg/sentry/fsimpl/devpts/master.go
@@ -17,6 +17,7 @@ package devpts
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
@@ -24,7 +25,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -80,7 +80,7 @@ func (mi *masterInode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs
// SetStat implements kernfs.Inode.SetStat
func (mi *masterInode) SetStat(ctx context.Context, vfsfs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {
if opts.Stat.Mask&linux.STATX_SIZE != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
return mi.InodeAttrs.SetStat(ctx, vfsfs, creds, opts)
}
@@ -132,7 +132,7 @@ func (mfd *masterFileDescription) Ioctl(ctx context.Context, io usermem.IO, args
t := kernel.TaskFromContext(ctx)
if t == nil {
// ioctl(2) may only be called from a task goroutine.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
switch cmd := args[1].Uint(); cmd {
@@ -177,7 +177,7 @@ func (mfd *masterFileDescription) Ioctl(ctx context.Context, io usermem.IO, args
return mfd.t.setForegroundProcessGroup(ctx, args, true /* isMaster */)
default:
maybeEmitUnimplementedEvent(ctx, cmd)
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fsimpl/devpts/queue.go b/pkg/sentry/fsimpl/devpts/queue.go
index 47b0f1599..85aeefa43 100644
--- a/pkg/sentry/fsimpl/devpts/queue.go
+++ b/pkg/sentry/fsimpl/devpts/queue.go
@@ -17,12 +17,12 @@ package devpts
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -98,17 +98,19 @@ func (q *queue) readableSize(t *kernel.Task, io usermem.IO, args arch.SyscallArg
}
-// read reads from q to userspace. It returns the number of bytes read as well
-// as whether the read caused more readable data to become available (whether
+// read reads from q to userspace. It returns:
+// - The number of bytes read
+// - Whether the read caused more readable data to become available (whether
// data was pushed from the wait buffer to the read buffer).
+// - Whether any data was echoed back (need to notify readers).
//
// Preconditions: l.termiosMu must be held for reading.
-func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {
+func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
if !q.readable {
- return 0, false, syserror.ErrWouldBlock
+ return 0, false, false, linuxerr.ErrWouldBlock
}
if dst.NumBytes() > canonMaxBytes {
@@ -131,19 +133,20 @@ func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipl
return n, nil
}))
if err != nil {
- return 0, false, err
+ return 0, false, false, err
}
// Move data from the queue's wait buffer to its read buffer.
- nPushed := q.pushWaitBufLocked(l)
+ nPushed, notifyEcho := q.pushWaitBufLocked(l)
- return int64(n), nPushed > 0, nil
+ return int64(n), nPushed > 0, notifyEcho, nil
}
// write writes to q from userspace.
+// The returned boolean indicates whether any data was echoed back.
//
// Preconditions: l.termiosMu must be held for reading.
-func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, error) {
+func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {
q.mu.Lock()
defer q.mu.Unlock()
@@ -153,7 +156,7 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip
room := waitBufMaxBytes - q.waitBufLen
// If out of room, return EAGAIN.
if room == 0 && copyLen > 0 {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Cap the size of the wait buffer.
if copyLen > room {
@@ -173,44 +176,49 @@ func (q *queue) write(ctx context.Context, src usermem.IOSequence, l *lineDiscip
return n, nil
}))
if err != nil {
- return 0, err
+ return 0, false, err
}
// Push data from the wait to the read buffer.
- q.pushWaitBufLocked(l)
+ _, notifyEcho := q.pushWaitBufLocked(l)
- return n, nil
+ return n, notifyEcho, nil
}
// writeBytes writes to q from b.
+// The returned boolean indicates whether any data was echoed back.
//
// Preconditions: l.termiosMu must be held for reading.
-func (q *queue) writeBytes(b []byte, l *lineDiscipline) {
+func (q *queue) writeBytes(b []byte, l *lineDiscipline) bool {
q.mu.Lock()
defer q.mu.Unlock()
// Write to the wait buffer.
q.waitBufAppend(b)
- q.pushWaitBufLocked(l)
+ _, notifyEcho := q.pushWaitBufLocked(l)
+ return notifyEcho
}
// pushWaitBufLocked fills the queue's read buffer with data from the wait
// buffer.
+// The returned boolean indicates whether any data was echoed back.
//
// Preconditions:
// * l.termiosMu must be held for reading.
// * q.mu must be locked.
-func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {
+func (q *queue) pushWaitBufLocked(l *lineDiscipline) (int, bool) {
if q.waitBufLen == 0 {
- return 0
+ return 0, false
}
// Move data from the wait to the read buffer.
var total int
var i int
+ var notifyEcho bool
for i = 0; i < len(q.waitBuf); i++ {
- n := q.transform(l, q, q.waitBuf[i])
+ n, echo := q.transform(l, q, q.waitBuf[i])
total += n
+ notifyEcho = notifyEcho || echo
if n != len(q.waitBuf[i]) {
// The read buffer filled up without consuming the
// entire buffer.
@@ -223,7 +231,7 @@ func (q *queue) pushWaitBufLocked(l *lineDiscipline) int {
q.waitBuf = q.waitBuf[i:]
q.waitBufLen -= uint64(total)
- return total
+ return total, notifyEcho
}
// Precondition: q.mu must be locked.
diff --git a/pkg/sentry/fsimpl/devpts/replica.go b/pkg/sentry/fsimpl/devpts/replica.go
index 96d2054cb..e251897b4 100644
--- a/pkg/sentry/fsimpl/devpts/replica.go
+++ b/pkg/sentry/fsimpl/devpts/replica.go
@@ -17,13 +17,13 @@ package devpts
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -92,7 +92,7 @@ func (ri *replicaInode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vf
// SetStat implements kernfs.Inode.SetStat
func (ri *replicaInode) SetStat(ctx context.Context, vfsfs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {
if opts.Stat.Mask&linux.STATX_SIZE != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
return ri.InodeAttrs.SetStat(ctx, vfsfs, creds, opts)
}
@@ -141,7 +141,7 @@ func (rfd *replicaFileDescription) Ioctl(ctx context.Context, io usermem.IO, arg
t := kernel.TaskFromContext(ctx)
if t == nil {
// ioctl(2) may only be called from a task goroutine.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
switch cmd := args[1].Uint(); cmd {
@@ -179,7 +179,7 @@ func (rfd *replicaFileDescription) Ioctl(ctx context.Context, io usermem.IO, arg
return rfd.inode.t.setForegroundProcessGroup(ctx, args, false /* isMaster */)
default:
maybeEmitUnimplementedEvent(ctx, cmd)
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fsimpl/eventfd/BUILD b/pkg/sentry/fsimpl/eventfd/BUILD
index c09fdc7f9..1cb049a29 100644
--- a/pkg/sentry/fsimpl/eventfd/BUILD
+++ b/pkg/sentry/fsimpl/eventfd/BUILD
@@ -9,11 +9,11 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fdnotifier",
"//pkg/hostarch",
"//pkg/log",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
diff --git a/pkg/sentry/fsimpl/eventfd/eventfd.go b/pkg/sentry/fsimpl/eventfd/eventfd.go
index 4f79cfcb7..af5ba5131 100644
--- a/pkg/sentry/fsimpl/eventfd/eventfd.go
+++ b/pkg/sentry/fsimpl/eventfd/eventfd.go
@@ -22,11 +22,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -149,7 +149,7 @@ func (efd *EventFileDescription) hostReadLocked(ctx context.Context, dst usermem
var buf [8]byte
if _, err := unix.Read(efd.hostfd, buf[:]); err != nil {
if err == unix.EWOULDBLOCK {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
return err
}
@@ -167,7 +167,7 @@ func (efd *EventFileDescription) read(ctx context.Context, dst usermem.IOSequenc
// We can't complete the read if the value is currently zero.
if efd.val == 0 {
efd.mu.Unlock()
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
// Update the value based on the mode the event is operating in.
@@ -200,7 +200,7 @@ func (efd *EventFileDescription) hostWriteLocked(val uint64) error {
hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := unix.Write(efd.hostfd, buf[:])
if err == unix.EWOULDBLOCK {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
return err
}
@@ -232,7 +232,7 @@ func (efd *EventFileDescription) Signal(val uint64) error {
// uint64 minus 1.
if val > math.MaxUint64-1-efd.val {
efd.mu.Unlock()
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
efd.val += val
diff --git a/pkg/sentry/fsimpl/ext/BUILD b/pkg/sentry/fsimpl/ext/BUILD
index 2dbc6bfd5..e69de29bb 100644
--- a/pkg/sentry/fsimpl/ext/BUILD
+++ b/pkg/sentry/fsimpl/ext/BUILD
@@ -1,103 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "dirent_list",
- out = "dirent_list.go",
- package = "ext",
- prefix = "dirent",
- template = "//pkg/ilist:generic_list",
- types = {
- "Element": "*dirent",
- "Linker": "*dirent",
- },
-)
-
-go_template_instance(
- name = "fstree",
- out = "fstree.go",
- package = "ext",
- prefix = "generic",
- template = "//pkg/sentry/vfs/genericfstree:generic_fstree",
- types = {
- "Dentry": "dentry",
- },
-)
-
-go_library(
- name = "ext",
- srcs = [
- "block_map_file.go",
- "dentry.go",
- "directory.go",
- "dirent_list.go",
- "ext.go",
- "extent_file.go",
- "file_description.go",
- "filesystem.go",
- "fstree.go",
- "inode.go",
- "regular_file.go",
- "symlink.go",
- "utils.go",
- ],
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fd",
- "//pkg/fspath",
- "//pkg/log",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/safemem",
- "//pkg/sentry/arch",
- "//pkg/sentry/fs",
- "//pkg/sentry/fs/lock",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/memmap",
- "//pkg/sentry/socket/unix/transport",
- "//pkg/sentry/syscalls/linux",
- "//pkg/sentry/vfs",
- "//pkg/sync",
- "//pkg/syserror",
- "//pkg/usermem",
- "//pkg/waiter",
- ],
-)
-
-go_test(
- name = "ext_test",
- size = "small",
- srcs = [
- "block_map_test.go",
- "ext_test.go",
- "extent_test.go",
- ],
- data = [
- "//pkg/sentry/fsimpl/ext:assets/bigfile.txt",
- "//pkg/sentry/fsimpl/ext:assets/file.txt",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext2",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext3",
- "//pkg/sentry/fsimpl/ext:assets/tiny.ext4",
- ],
- library = ":ext",
- deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/marshal/primitive",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext/disklayout",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- "//pkg/syserror",
- "//pkg/test/testutil",
- "//pkg/usermem",
- "@com_github_google_go_cmp//cmp:go_default_library",
- "@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/README.md b/pkg/sentry/fsimpl/ext/README.md
deleted file mode 100644
index af00cfda8..000000000
--- a/pkg/sentry/fsimpl/ext/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-## EXT(2/3/4) File System
-
-This is a filesystem driver which supports ext2, ext3 and ext4 filesystems.
-Linux has specialized drivers for each variant but none which supports all. This
-library takes advantage of ext's backward compatibility and understands the
-internal organization of on-disk structures to support all variants.
-
-This driver implementation diverges from the Linux implementations in being more
-forgiving about versioning. For instance, if a filesystem contains both extent
-based inodes and classical block map based inodes, this driver will not complain
-and interpret them both correctly. While in Linux this would be an issue. This
-blurs the line between the three ext fs variants.
-
-Ext2 is considered deprecated as of Red Hat Enterprise Linux 7, and ext3 has
-been superseded by ext4 by large performance gains. Thus it is recommended to
-upgrade older filesystem images to ext4 using e2fsprogs for better performance.
-
-### Read Only
-
-This driver currently only allows read only operations. A lot of the design
-decisions are based on this feature. There are plans to implement write (the
-process for which is documented in the future work section).
-
-### Performance
-
-One of the biggest wins about this driver is that it directly talks to the
-underlying block device (or whatever persistent storage is being used), instead
-of making expensive RPCs to a gofer.
-
-Another advantage is that ext fs supports fast concurrent reads. Currently the
-device is represented using a `io.ReaderAt` which allows for concurrent reads.
-All reads are directly passed to the device driver which intelligently serves
-the read requests in the optimal order. There is no congestion due to locking
-while reading in the filesystem level.
-
-Reads are optimized further in the way file data is transferred over to user
-memory. Ext fs directly copies over file data from disk into user memory with no
-additional allocations on the way. We can only get faster by preloading file
-data into memory (see future work section).
-
-The internal structures used to represent files, inodes and file descriptors use
-a lot of inheritance. With the level of indirection that an interface adds with
-an internal pointer, it can quickly fragment a structure across memory. As this
-runs along side a full blown kernel (which is memory intensive), having a
-fragmented struct might hurt performance. Hence these internal structures,
-though interfaced, are tightly packed in memory using the same inheritance
-pattern that pkg/sentry/vfs uses. The pkg/sentry/fsimpl/ext/disklayout package
-makes an execption to this pattern for reasons documented in the package.
-
-### Security
-
-This driver also intends to help sandbox the container better by reducing the
-surface of the host kernel that the application touches. It prevents the
-application from exploiting vulnerabilities in the host filesystem driver. All
-`io.ReaderAt.ReadAt()` calls are translated to `pread(2)` which are directly
-passed to the device driver in the kernel. Hence this reduces the surface for
-attack.
-
-The application can not affect any host filesystems other than the one passed
-via block device by the user.
-
-### Future Work
-
-#### Write
-
-To support write operations we would need to modify the block device underneath.
-Currently, the driver does not modify the device at all, not even for updating
-the access times for reads. Modifying the filesystem incorrectly can corrupt it
-and render it unreadable for other correct ext(x) drivers. Hence caution must be
-maintained while modifying metadata structures.
-
-Ext4 specifically is built for performance and has added a lot of complexity as
-to how metadata structures are modified. For instance, files that are organized
-via an extent tree which must be balanced and file data blocks must be placed in
-the same extent as much as possible to increase locality. Such properties must
-be maintained while modifying the tree.
-
-Ext filesystems boast a lot about locality, which plays a big role in them being
-performant. The block allocation algorithm in Linux does a good job in keeping
-related data together. This behavior must be maintained as much as possible,
-else we might end up degrading the filesystem performance over time.
-
-Ext4 also supports a wide variety of features which are specialized for varying
-use cases. Implementing all of them can get difficult very quickly.
-
-Ext(x) checksums all its metadata structures to check for corruption, so
-modification of any metadata struct must correspond with re-checksumming the
-struct. Linux filesystem drivers also order on-disk updates intelligently to not
-corrupt the filesystem and also remain performant. The in-memory metadata
-structures must be kept in sync with what is on disk.
-
-There is also replication of some important structures across the filesystem.
-All replicas must be updated when their original copy is updated. There is also
-provisioning for snapshotting which must be kept in mind, although it should not
-affect this implementation unless we allow users to create filesystem snapshots.
-
-Ext4 also introduced journaling (jbd2). The journal must be updated
-appropriately.
-
-#### Performance
-
-To improve performance we should implement a buffer cache, and optionally, read
-ahead for small files. While doing so we must also keep in mind the memory usage
-and have a reasonable cap on how much file data we want to hold in memory.
-
-#### Features
-
-Our current implementation will work with most ext4 filesystems for readonly
-purposed. However, the following features are not supported yet:
-
-- Journal
-- Snapshotting
-- Extended Attributes
-- Hash Tree Directories
-- Meta Block Groups
-- Multiple Mount Protection
-- Bigalloc
diff --git a/pkg/sentry/fsimpl/ext/assets/README.md b/pkg/sentry/fsimpl/ext/assets/README.md
deleted file mode 100644
index 6f1e81b3a..000000000
--- a/pkg/sentry/fsimpl/ext/assets/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-### Tiny Ext(2/3/4) Images
-
-The images are of size 64Kb which supports 64 1k blocks and 16 inodes. This is
-the smallest size mkfs.ext(2/3/4) works with.
-
-These images were generated using the following commands.
-
-```bash
-fallocate -l 64K tiny.ext$VERSION
-mkfs.ext$VERSION -j tiny.ext$VERSION
-```
-
-where `VERSION` is `2`, `3` or `4`.
-
-You can mount it using:
-
-```bash
-sudo mount -o loop tiny.ext$VERSION $MOUNTPOINT
-```
-
-`file.txt`, `bigfile.txt` and `symlink.txt` were added to this image by just
-mounting it and copying (while preserving links) those files to the mountpoint
-directory using:
-
-```bash
-sudo cp -P {file.txt,symlink.txt,bigfile.txt} $MOUNTPOINT
-```
-
-The files in this directory mirror the contents and organisation of the files
-stored in the image.
-
-You can umount the filesystem using:
-
-```bash
-sudo umount $MOUNTPOINT
-```
diff --git a/pkg/sentry/fsimpl/ext/assets/bigfile.txt b/pkg/sentry/fsimpl/ext/assets/bigfile.txt
deleted file mode 100644
index 3857cf516..000000000
--- a/pkg/sentry/fsimpl/ext/assets/bigfile.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus faucibus eleifend orci, ut ornare nibh faucibus eu. Cras at condimentum massa. Nullam luctus, elit non porttitor congue, sapien diam feugiat sapien, sed eleifend nulla mauris non arcu. Sed lacinia mauris magna, eu mollis libero varius sit amet. Donec mollis, quam convallis commodo posuere, dolor nisi placerat nisi, in faucibus augue mi eu lorem. In pharetra consectetur faucibus. Ut euismod ex efficitur egestas tincidunt. Maecenas condimentum ut ante in rutrum. Vivamus sed arcu tempor, faucibus turpis et, lacinia diam.
-
-Sed in lacus vel nisl interdum bibendum in sed justo. Nunc tellus risus, molestie vitae arcu sed, molestie tempus ligula. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc risus neque, volutpat et ante non, ullamcorper condimentum ante. Aliquam sed metus in urna condimentum convallis. Vivamus ut libero mauris. Proin mollis posuere consequat. Vestibulum placerat mollis est et pulvinar.
-
-Donec rutrum odio ac diam pharetra, id fermentum magna cursus. Pellentesque in dapibus elit, et condimentum orci. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Suspendisse euismod dapibus est, id vestibulum mauris. Nulla facilisi. Nulla cursus gravida nisi. Phasellus vestibulum rutrum lectus, a dignissim mauris hendrerit vitae. In at elementum mauris. Integer vel efficitur velit. Nullam fringilla sapien mi, quis luctus neque efficitur ac. Aenean nec quam dapibus nunc commodo pharetra. Proin sapien mi, fermentum aliquet vulputate non, aliquet porttitor diam. Quisque lacinia, urna et finibus fermentum, nunc lacus vehicula ex, sed congue metus lectus ac quam. Aliquam erat volutpat. Suspendisse sodales, dolor ut tincidunt finibus, augue erat varius tellus, a interdum erat sem at nunc. Vestibulum cursus iaculis sapien, vitae feugiat dui auctor quis.
-
-Pellentesque nec maximus nulla, eu blandit diam. Maecenas quis arcu ornare, congue ante at, vehicula ipsum. Praesent feugiat mauris rutrum sem fermentum, nec luctus ipsum placerat. Pellentesque placerat ipsum at dignissim fringilla. Vivamus et posuere sem, eget hendrerit felis. Aenean vulputate, augue vel mollis feugiat, justo ipsum mollis dolor, eu mollis elit neque ut ipsum. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Fusce bibendum sem quam, vulputate laoreet mi dapibus imperdiet. Sed a purus non nibh pretium aliquet. Integer eget luctus augue, vitae tincidunt magna. Ut eros enim, egestas eu nulla et, lobortis egestas arcu. Cras id ipsum ac justo lacinia rutrum. Vivamus lectus leo, ultricies sed justo at, pellentesque feugiat magna. Ut sollicitudin neque elit, vel ornare mauris commodo id.
-
-Duis dapibus orci et sapien finibus finibus. Mauris eleifend, lacus at vestibulum maximus, quam ligula pharetra erat, sit amet dapibus neque elit vitae neque. In bibendum sollicitudin erat, eget ultricies tortor malesuada at. Sed sit amet orci turpis. Donec feugiat ligula nibh, molestie tincidunt lectus elementum id. Donec volutpat maximus nibh, in vulputate felis posuere eu. Cras tincidunt ullamcorper lacus. Phasellus porta lorem auctor, congue magna a, commodo elit.
-
-Etiam auctor mi quis elit sodales, eu pulvinar arcu condimentum. Aenean imperdiet risus et dapibus tincidunt. Nullam tincidunt dictum dui, sed commodo urna rutrum id. Ut mollis libero vel elit laoreet bibendum. Quisque arcu arcu, tincidunt at ultricies id, vulputate nec metus. In tristique posuere quam sit amet volutpat. Vivamus scelerisque et nunc at dapibus. Fusce finibus libero ut ligula pretium rhoncus. Mauris non elit in arcu finibus imperdiet. Pellentesque nec massa odio. Proin rutrum mauris non sagittis efficitur. Aliquam auctor quam at dignissim faucibus. Ut eget ligula in magna posuere ultricies vitae sit amet turpis. Duis maximus odio nulla. Donec gravida sem tristique tempus scelerisque.
-
-Interdum et malesuada fames ac ante ipsum primis in faucibus. Fusce pharetra magna vulputate aliquet tempus. Duis id hendrerit arcu. Quisque ut ex elit. Integer velit orci, venenatis ut sapien ac, placerat porttitor dui. Interdum et malesuada fames ac ante ipsum primis in faucibus. Nunc hendrerit cursus diam, hendrerit finibus ipsum scelerisque ut. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.
-
-Nulla non euismod neque. Phasellus vel sapien eu metus pulvinar rhoncus. Suspendisse eu mollis tellus, quis vestibulum tortor. Maecenas interdum dolor sed nulla fermentum maximus. Donec imperdiet ullamcorper condimentum. Nam quis nibh ante. Praesent quis tellus ut tortor pulvinar blandit sit amet ut sapien. Vestibulum est orci, pellentesque vitae tristique sit amet, tristique non felis.
-
-Vivamus sodales pellentesque varius. Sed vel tempus ligula. Nulla tristique nisl vel dui facilisis, ac sodales augue hendrerit. Proin augue nisi, vestibulum quis augue nec, sagittis tincidunt velit. Vestibulum euismod, nulla nec sodales faucibus, urna sapien vulputate magna, id varius metus sapien ut neque. Duis in mollis urna, in scelerisque enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nunc condimentum dictum turpis, et egestas neque dapibus eget. Quisque fringilla, dui eu venenatis eleifend, erat nibh lacinia urna, at lacinia lacus sapien eu dui. Duis eu erat ut mi lacinia convallis a sed ex.
-
-Fusce elit metus, tincidunt nec eleifend a, hendrerit nec ligula. Duis placerat finibus sollicitudin. In euismod porta tellus, in luctus justo bibendum bibendum. Maecenas at magna eleifend lectus tincidunt suscipit ut a ligula. Nulla tempor accumsan felis, fermentum dapibus est eleifend vitae. Mauris urna sem, fringilla at ultricies non, ultrices in arcu. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam vehicula nunc at laoreet imperdiet. Nunc tristique ut risus id aliquet. Integer eleifend massa orci.
-
-Vestibulum sed ante sollicitudin nisi fringilla bibendum nec vel quam. Sed pretium augue eu ligula congue pulvinar. Donec vitae magna tincidunt, pharetra lacus id, convallis nulla. Cras viverra nisl nisl, varius convallis leo vulputate nec. Morbi at consequat dui, sed aliquet metus. Sed suscipit fermentum mollis. Maecenas nec mi sodales, tincidunt purus in, tristique mauris. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec interdum mi in velit efficitur, quis ultrices ex imperdiet. Sed vestibulum, magna ut tristique pretium, mi ipsum placerat tellus, non tempor enim augue et ex. Pellentesque eget felis quis ante sodales viverra ac sed lacus. Donec suscipit tempus massa, eget laoreet massa molestie at.
-
-Aenean fringilla dui non aliquet consectetur. Fusce cursus quam nec orci hendrerit faucibus. Donec consequat suscipit enim, non volutpat lectus auctor interdum. Proin lorem purus, maximus vel orci vitae, suscipit egestas turpis. Donec risus urna, congue a sem eu, aliquet placerat odio. Morbi gravida tristique turpis, quis efficitur enim. Nunc interdum gravida ipsum vel facilisis. Nunc congue finibus sollicitudin. Quisque euismod aliquet lectus et tincidunt. Curabitur ultrices sem ut mi fringilla fermentum. Morbi pretium, nisi sit amet dapibus congue, dolor enim consectetur risus, a interdum ligula odio sed odio. Quisque facilisis, mi at suscipit gravida, nunc sapien cursus justo, ut luctus odio nulla quis leo. Integer condimentum lobortis mauris, non egestas tellus lobortis sit amet.
-
-In sollicitudin velit ac ante vehicula, vitae varius tortor mollis. In hac habitasse platea dictumst. Quisque et orci lorem. Integer malesuada fringilla luctus. Pellentesque malesuada, mi non lobortis porttitor, ante ligula vulputate ante, nec dictum risus eros sit amet sapien. Nulla aliquam lorem libero, ac varius nulla tristique eget. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Ut pellentesque mauris orci, vel consequat mi varius a. Ut sit amet elit vulputate, lacinia metus non, fermentum nisl. Pellentesque eu nisi sed quam egestas blandit. Duis sit amet lobortis dolor. Donec consectetur sem interdum, tristique elit sit amet, sodales lacus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Fusce id aliquam augue. Sed pretium congue risus vitae lacinia. Vestibulum non vulputate risus, ut malesuada justo.
-
-Sed odio elit, consectetur ac mauris quis, consequat commodo libero. Fusce sodales velit vulputate pulvinar fermentum. Donec iaculis nec nisl eget faucibus. Mauris at dictum velit. Donec fermentum lectus eu viverra volutpat. Aliquam consequat facilisis lorem, cursus consequat dui bibendum ullamcorper. Pellentesque nulla magna, imperdiet at magna et, cursus egestas enim. Nullam semper molestie lectus sit amet semper. Duis eget tincidunt est. Integer id neque risus. Integer ultricies hendrerit vestibulum. Donec blandit blandit sagittis. Nunc consectetur vitae nisi consectetur volutpat.
-
-Nulla id lorem fermentum, efficitur magna a, hendrerit dui. Vivamus sagittis orci gravida, bibendum quam eget, molestie est. Phasellus nec enim tincidunt, volutpat sapien non, laoreet diam. Nulla posuere enim nec porttitor lobortis. Donec auctor odio ut orci eleifend, ut eleifend purus convallis. Interdum et malesuada fames ac ante ipsum primis in faucibus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut hendrerit, purus eget viverra tincidunt, sem magna imperdiet libero, et aliquam turpis neque vitae elit. Maecenas semper varius iaculis. Cras non lorem quis quam bibendum eleifend in et libero. Curabitur at purus mauris. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus porta diam sed elit eleifend gravida.
-
-Nulla facilisi. Ut ultricies diam vel diam consectetur, vel porta augue molestie. Fusce interdum sapien et metus facilisis pellentesque. Nulla convallis sem at nunc vehicula facilisis. Nam ac rutrum purus. Nunc bibendum, dolor sit amet tempus ullamcorper, lorem leo tempor sem, id fringilla nunc augue scelerisque augue. Nullam sit amet rutrum nisl. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec sed mauris gravida eros vehicula sagittis at eget orci. Cras elementum, eros at accumsan bibendum, libero neque blandit purus, vitae vestibulum libero massa ac nibh. Integer at placerat nulla. Mauris eu eleifend orci. Aliquam consequat ligula vitae erat porta lobortis. Duis fermentum elit ac aliquet ornare.
-
-Mauris eget cursus tellus, eget sodales purus. Aliquam malesuada, augue id vulputate finibus, nisi ex bibendum nisl, sit amet laoreet quam urna a dolor. Nullam ultricies, sapien eu laoreet consequat, erat eros dignissim diam, ultrices sodales lectus mauris et leo. Morbi lacinia eu ante at tempus. Sed iaculis finibus magna malesuada efficitur. Donec faucibus erat sit amet elementum feugiat. Praesent a placerat nisi. Etiam lacinia gravida diam, et sollicitudin sapien tincidunt ut.
-
-Maecenas felis quam, tincidunt vitae venenatis scelerisque, viverra vitae odio. Phasellus enim neque, ultricies suscipit malesuada sit amet, vehicula sit amet purus. Nulla placerat sit amet dui vel tincidunt. Nam quis neque vel magna commodo egestas. Vestibulum sagittis rutrum lorem ut congue. Maecenas vel ultrices tellus. Donec efficitur, urna ac consequat iaculis, lorem felis pharetra eros, eget faucibus orci lectus sit amet arcu.
-
-Ut a tempus nisi. Nulla facilisi. Praesent vulputate maximus mi et dapibus. Sed sit amet libero ac augue hendrerit efficitur in a sapien. Mauris placerat velit sit amet tellus sollicitudin faucibus. Donec egestas a magna ac suscipit. Duis enim sapien, mollis sed egestas et, vestibulum vel leo.
-
-Proin quis dapibus dui. Donec eu tincidunt nunc. Vivamus eget purus consectetur, maximus ante vitae, tincidunt elit. Aenean mattis dolor a gravida aliquam. Praesent quis tellus id sem maximus vulputate nec sed nulla. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur metus nulla, volutpat volutpat est eu, hendrerit congue erat. Aliquam sollicitudin augue ante. Sed sollicitudin, magna eu consequat elementum, mi augue ullamcorper felis, molestie imperdiet erat metus iaculis est. Proin ac tortor nisi. Pellentesque quis nisi risus. Integer enim sapien, tincidunt quis tortor id, accumsan venenatis mi. Nulla facilisi.
-
-Cras pretium sit amet quam congue maximus. Morbi lacus libero, imperdiet commodo massa sed, scelerisque placerat libero. Cras nisl nisi, consectetur sed bibendum eu, venenatis at enim. Proin sodales justo at quam aliquam, a consectetur mi ornare. Donec porta ac est sit amet efficitur. Suspendisse vestibulum tortor id neque imperdiet, id lacinia risus vehicula. Phasellus ac eleifend purus. Mauris vel gravida ante. Aliquam vitae lobortis risus. Sed vehicula consectetur tincidunt. Nam et justo vitae purus molestie consequat. Pellentesque ipsum ex, convallis quis blandit non, gravida et urna. Donec diam ligula amet.
diff --git a/pkg/sentry/fsimpl/ext/assets/file.txt b/pkg/sentry/fsimpl/ext/assets/file.txt
deleted file mode 100644
index 980a0d5f1..000000000
--- a/pkg/sentry/fsimpl/ext/assets/file.txt
+++ /dev/null
@@ -1 +0,0 @@
-Hello World!
diff --git a/pkg/sentry/fsimpl/ext/assets/symlink.txt b/pkg/sentry/fsimpl/ext/assets/symlink.txt
deleted file mode 120000
index 4c330738c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/symlink.txt
+++ /dev/null
@@ -1 +0,0 @@
-file.txt \ No newline at end of file
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext2 b/pkg/sentry/fsimpl/ext/assets/tiny.ext2
deleted file mode 100644
index 381ade9bf..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext2
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext3 b/pkg/sentry/fsimpl/ext/assets/tiny.ext3
deleted file mode 100644
index 0e97a324c..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext3
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/assets/tiny.ext4 b/pkg/sentry/fsimpl/ext/assets/tiny.ext4
deleted file mode 100644
index a6859736d..000000000
--- a/pkg/sentry/fsimpl/ext/assets/tiny.ext4
+++ /dev/null
Binary files differ
diff --git a/pkg/sentry/fsimpl/ext/benchmark/BUILD b/pkg/sentry/fsimpl/ext/benchmark/BUILD
deleted file mode 100644
index 6c5a559fd..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "benchmark_test",
- size = "small",
- srcs = ["benchmark_test.go"],
- deps = [
- "//pkg/context",
- "//pkg/fspath",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/fsimpl/ext",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/vfs",
- ],
-)
diff --git a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go b/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
deleted file mode 100644
index 2ee7cc7ac..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/benchmark_test.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// These benchmarks emulate memfs benchmarks. Ext4 images must be created
-// before this benchmark is run using the `make_deep_ext4.sh` script at
-// /tmp/image-{depth}.ext4 for all the depths tested below.
-//
-// The benchmark itself cannot run the script because the script requires
-// sudo privileges to create the file system images.
-package benchmark_test
-
-import (
- "fmt"
- "os"
- "runtime"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-var depths = []int{1, 2, 3, 8, 64, 100}
-
-const filename = "file.txt"
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(b *testing.B, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- f, err := os.Open(imagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- return nil, nil, nil, nil, err
- }
- vfsObj.MustRegisterFilesystemType("extfs", ext.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, imagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// mount mounts extfs at the path operation passed. Returns a tear down
-// function which must be called after the test is run for clean up.
-func mount(b *testing.B, imagePath string, vfsfs *vfs.VirtualFilesystem, pop *vfs.PathOperation) func() {
- b.Helper()
-
- f, err := os.Open(imagePath)
- if err != nil {
- b.Fatalf("could not open image at %s: %v", imagePath, err)
- }
-
- ctx := contexttest.Context(b)
- creds := auth.CredentialsFromContext(ctx)
-
- if _, err := vfsfs.MountAt(ctx, creds, imagePath, pop, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- }); err != nil {
- b.Fatalf("failed to mount tmpfs submount: %v", err)
- }
- return func() {
- if err := f.Close(); err != nil {
- b.Fatalf("tearDown failed: %v", err)
- }
- }
-}
-
-// BenchmarkVFS2Ext4fsStat emulates BenchmarkVFS2MemfsStat.
-func BenchmarkVFS2Ext4fsStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", depth))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- var filePathBuilder strings.Builder
- filePathBuilder.WriteByte('/')
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check.
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
-
-// BenchmarkVFS2ExtfsMountStat emulates BenchmarkVFS2MemfsMountStat.
-func BenchmarkVFS2ExtfsMountStat(b *testing.B) {
- for _, depth := range depths {
- b.Run(fmt.Sprintf("%d", depth), func(b *testing.B) {
- // Create root extfs with depth 1 so we can mount extfs again at /1/.
- ctx, vfsfs, root, tearDown, err := setUp(b, fmt.Sprintf("/tmp/image-%d.ext4", 1))
- if err != nil {
- b.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- creds := auth.CredentialsFromContext(ctx)
- mountPointName := "/1/"
- pop := vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(mountPointName),
- }
-
- // Save the mount point for later use.
- mountPoint, err := vfsfs.GetDentryAt(ctx, creds, &pop, &vfs.GetDentryOptions{})
- if err != nil {
- b.Fatalf("failed to walk to mount point: %v", err)
- }
- defer mountPoint.DecRef(ctx)
-
- // Create extfs submount.
- mountTearDown := mount(b, fmt.Sprintf("/tmp/image-%d.ext4", depth), vfsfs, &pop)
- defer mountTearDown()
-
- var filePathBuilder strings.Builder
- filePathBuilder.WriteString(mountPointName)
- for i := 1; i <= depth; i++ {
- filePathBuilder.WriteString(fmt.Sprintf("%d", i))
- filePathBuilder.WriteByte('/')
- }
- filePathBuilder.WriteString(filename)
- filePath := filePathBuilder.String()
-
- runtime.GC()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- stat, err := vfsfs.StatAt(ctx, creds, &vfs.PathOperation{
- Root: *root,
- Start: *root,
- Path: fspath.Parse(filePath),
- FollowFinalSymlink: true,
- }, &vfs.StatOptions{})
- if err != nil {
- b.Fatalf("stat(%q) failed: %v", filePath, err)
- }
- // Sanity check. touch(1) always creates files of size 0 (empty).
- if stat.Size > 0 {
- b.Fatalf("got wrong file size (%d)", stat.Size)
- }
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh b/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
deleted file mode 100755
index d0910da1f..000000000
--- a/pkg/sentry/fsimpl/ext/benchmark/make_deep_ext4.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script creates an ext4 image with $1 depth of directories and a file in
-# the inner most directory. The created file is at path /1/2/.../depth/file.txt.
-# The ext4 image is written to $2. The image is temporarily mounted at
-# /tmp/mountpoint. This script must be run with sudo privileges.
-
-# Usage:
-# sudo bash make_deep_ext4.sh {depth} {output path}
-
-# Check positional arguments.
-if [ "$#" -ne 2 ]; then
- echo "Usage: sudo bash make_deep_ext4.sh {depth} {output path}"
- exit 1
-fi
-
-# Make sure depth is a non-negative number.
-if ! [[ "$1" =~ ^[0-9]+$ ]]; then
- echo "Depth must be a non-negative number."
- exit 1
-fi
-
-# Create a 1 MB filesystem image at the requested output path.
-rm -f $2
-fallocate -l 1M $2
-if [ $? -ne 0 ]; then
- echo "fallocate failed"
- exit $?
-fi
-
-# Convert that blank into an ext4 image.
-mkfs.ext4 -j $2
-if [ $? -ne 0 ]; then
- echo "mkfs.ext4 failed"
- exit $?
-fi
-
-# Mount the image.
-MOUNTPOINT=/tmp/mountpoint
-mkdir -p $MOUNTPOINT
-mount -o loop $2 $MOUNTPOINT
-if [ $? -ne 0 ]; then
- echo "mount failed"
- exit $?
-fi
-
-# Create nested directories and the file.
-if [ "$1" -eq 0 ]; then
- FILEPATH=$MOUNTPOINT/file.txt
-else
- FILEPATH=$MOUNTPOINT/$(seq -s '/' 1 $1)/file.txt
-fi
-mkdir -p $(dirname $FILEPATH) || exit
-touch $FILEPATH
-
-# Clean up.
-umount $MOUNTPOINT
-rm -rf $MOUNTPOINT
diff --git a/pkg/sentry/fsimpl/ext/block_map_file.go b/pkg/sentry/fsimpl/ext/block_map_file.go
deleted file mode 100644
index 1165234f9..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_file.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "math"
-
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-const (
- // numDirectBlks is the number of direct blocks in ext block map inodes.
- numDirectBlks = 12
-)
-
-// blockMapFile is a type of regular file which uses direct/indirect block
-// addressing to store file data. This was deprecated in ext4.
-type blockMapFile struct {
- regFile regularFile
-
- // directBlks are the direct blocks numbers. The physical blocks pointed by
- // these holds file data. Contains file blocks 0 to 11.
- directBlks [numDirectBlks]primitive.Uint32
-
- // indirectBlk is the physical block which contains (blkSize/4) direct block
- // numbers (as uint32 integers).
- indirectBlk primitive.Uint32
-
- // doubleIndirectBlk is the physical block which contains (blkSize/4) indirect
- // block numbers (as uint32 integers).
- doubleIndirectBlk primitive.Uint32
-
- // tripleIndirectBlk is the physical block which contains (blkSize/4) doubly
- // indirect block numbers (as uint32 integers).
- tripleIndirectBlk primitive.Uint32
-
- // coverage at (i)th index indicates the amount of file data a node at
- // height (i) covers. Height 0 is the direct block.
- coverage [4]uint64
-}
-
-// Compiles only if blockMapFile implements io.ReaderAt.
-var _ io.ReaderAt = (*blockMapFile)(nil)
-
-// newBlockMapFile is the blockMapFile constructor. It initializes the file to
-// physical blocks map with (at most) the first 12 (direct) blocks.
-func newBlockMapFile(args inodeArgs) (*blockMapFile, error) {
- file := &blockMapFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
-
- for i := uint(0); i < 4; i++ {
- file.coverage[i] = getCoverage(file.regFile.inode.blkSize, i)
- }
-
- blkMap := file.regFile.inode.diskInode.Data()
- for i := 0; i < numDirectBlks; i++ {
- file.directBlks[i].UnmarshalBytes(blkMap[i*4 : (i+1)*4])
- }
- file.indirectBlk.UnmarshalBytes(blkMap[numDirectBlks*4 : (numDirectBlks+1)*4])
- file.doubleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+1)*4 : (numDirectBlks+2)*4])
- file.tripleIndirectBlk.UnmarshalBytes(blkMap[(numDirectBlks+2)*4 : (numDirectBlks+3)*4])
- return file, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *blockMapFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, syserror.EINVAL
- }
-
- offset := uint64(off)
- size := f.regFile.inode.diskInode.Size()
- if offset >= size {
- return 0, io.EOF
- }
-
- // dirBlksEnd is the file offset until which direct blocks cover file data.
- // Direct blocks cover 0 <= file offset < dirBlksEnd.
- dirBlksEnd := numDirectBlks * f.coverage[0]
-
- // indirBlkEnd is the file offset until which the indirect block covers file
- // data. The indirect block covers dirBlksEnd <= file offset < indirBlkEnd.
- indirBlkEnd := dirBlksEnd + f.coverage[1]
-
- // doubIndirBlkEnd is the file offset until which the double indirect block
- // covers file data. The double indirect block covers the range
- // indirBlkEnd <= file offset < doubIndirBlkEnd.
- doubIndirBlkEnd := indirBlkEnd + f.coverage[2]
-
- read := 0
- toRead := len(dst)
- if uint64(toRead)+offset > size {
- toRead = int(size - offset)
- }
- for read < toRead {
- var err error
- var curR int
-
- // Figure out which block to delegate the read to.
- switch {
- case offset < dirBlksEnd:
- // Direct block.
- curR, err = f.read(uint32(f.directBlks[offset/f.regFile.inode.blkSize]), offset%f.regFile.inode.blkSize, 0, dst[read:])
- case offset < indirBlkEnd:
- // Indirect block.
- curR, err = f.read(uint32(f.indirectBlk), offset-dirBlksEnd, 1, dst[read:])
- case offset < doubIndirBlkEnd:
- // Doubly indirect block.
- curR, err = f.read(uint32(f.doubleIndirectBlk), offset-indirBlkEnd, 2, dst[read:])
- default:
- // Triply indirect block.
- curR, err = f.read(uint32(f.tripleIndirectBlk), offset-doubIndirBlkEnd, 3, dst[read:])
- }
-
- read += curR
- offset += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- if read < len(dst) {
- return read, io.EOF
- }
- return read, nil
-}
-
-// read is the recursive step of the ReadAt function. It relies on knowing the
-// current node's location on disk (curPhyBlk) and its height in the block map
-// tree. A height of 0 shows that the current node is actually holding file
-// data. relFileOff tells the offset from which we need to start to reading
-// under the current node. It is completely relative to the current node.
-func (f *blockMapFile) read(curPhyBlk uint32, relFileOff uint64, height uint, dst []byte) (int, error) {
- curPhyBlkOff := int64(curPhyBlk) * int64(f.regFile.inode.blkSize)
- if height == 0 {
- toRead := int(f.regFile.inode.blkSize - relFileOff)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], curPhyBlkOff+int64(relFileOff))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
- }
-
- childCov := f.coverage[height-1]
- startIdx := relFileOff / childCov
- endIdx := f.regFile.inode.blkSize / 4 // This is exclusive.
- wantEndIdx := (relFileOff + uint64(len(dst))) / childCov
- wantEndIdx++ // Make this exclusive.
- if wantEndIdx < endIdx {
- endIdx = wantEndIdx
- }
-
- read := 0
- curChildOff := relFileOff % childCov
- for i := startIdx; i < endIdx; i++ {
- var childPhyBlk primitive.Uint32
- err := readFromDisk(f.regFile.inode.fs.dev, curPhyBlkOff+int64(i*4), &childPhyBlk)
- if err != nil {
- return read, err
- }
-
- n, err := f.read(uint32(childPhyBlk), curChildOff, height-1, dst[read:])
- read += n
- if err != nil {
- return read, err
- }
-
- curChildOff = 0
- }
-
- return read, nil
-}
-
-// getCoverage returns the number of bytes a node at the given height covers.
-// Height 0 is the file data block itself. Height 1 is the indirect block.
-//
-// Formula: blkSize * ((blkSize / 4)^height)
-func getCoverage(blkSize uint64, height uint) uint64 {
- return blkSize * uint64(math.Pow(float64(blkSize/4), float64(height)))
-}
diff --git a/pkg/sentry/fsimpl/ext/block_map_test.go b/pkg/sentry/fsimpl/ext/block_map_test.go
deleted file mode 100644
index ed98b482e..000000000
--- a/pkg/sentry/fsimpl/ext/block_map_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-// These consts are for mocking the block map tree.
-const (
- mockBMBlkSize = uint32(16)
- mockBMDiskSize = 2500
-)
-
-// TestBlockMapReader stress tests block map reader functionality. It performs
-// random length reads from all possible positions in the block map structure.
-func TestBlockMapReader(t *testing.T) {
- mockBMFile, want := blockMapSetUp(t)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockBMFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// blkNumGen is a number generator which gives block numbers for building the
-// block map file on disk. It gives unique numbers in a random order which
-// facilitates in creating an extremely fragmented filesystem.
-type blkNumGen struct {
- nums []uint32
-}
-
-// newBlkNumGen is the blkNumGen constructor.
-func newBlkNumGen() *blkNumGen {
- blkNums := &blkNumGen{}
- lim := mockBMDiskSize / mockBMBlkSize
- blkNums.nums = make([]uint32, lim)
- for i := uint32(0); i < lim; i++ {
- blkNums.nums[i] = i
- }
-
- rand.Shuffle(int(lim), func(i, j int) {
- blkNums.nums[i], blkNums.nums[j] = blkNums.nums[j], blkNums.nums[i]
- })
- return blkNums
-}
-
-// next returns the next random block number.
-func (n *blkNumGen) next() uint32 {
- ret := n.nums[0]
- n.nums = n.nums[1:]
- return ret
-}
-
-// blockMapSetUp creates a mock disk and a block map file. It initializes the
-// block map file with 12 direct block, 1 indirect block, 1 double indirect
-// block and 1 triple indirect block (basically fill it till the rim). It
-// initializes the disk to reflect the inode. Also returns the file data that
-// the inode covers and that is written to disk.
-func blockMapSetUp(t *testing.T) (*blockMapFile, []byte) {
- mockDisk := make([]byte, mockBMDiskSize)
- var fileData []byte
- blkNums := newBlkNumGen()
- off := 0
- data := make([]byte, (numDirectBlks+3)*(*primitive.Uint32)(nil).SizeBytes())
-
- // Write the direct blocks.
- for i := 0; i < numDirectBlks; i++ {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(data[off:])
- off += curBlkNum.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(curBlkNum), 0, blkNums)...)
- }
-
- // Write to indirect block.
- indirectBlk := primitive.Uint32(blkNums.next())
- indirectBlk.MarshalBytes(data[off:])
- off += indirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(indirectBlk), 1, blkNums)...)
-
- // Write to double indirect block.
- doublyIndirectBlk := primitive.Uint32(blkNums.next())
- doublyIndirectBlk.MarshalBytes(data[off:])
- off += doublyIndirectBlk.SizeBytes()
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(doublyIndirectBlk), 2, blkNums)...)
-
- // Write to triple indirect block.
- triplyIndirectBlk := primitive.Uint32(blkNums.next())
- triplyIndirectBlk.MarshalBytes(data[off:])
- fileData = append(fileData, writeFileDataToBlock(mockDisk, uint32(triplyIndirectBlk), 3, blkNums)...)
-
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: getMockBMFileFize(),
- },
- },
- blkSize: uint64(mockBMBlkSize),
- }
- copy(args.diskInode.Data(), data)
-
- mockFile, err := newBlockMapFile(args)
- if err != nil {
- t.Fatalf("newBlockMapFile failed: %v", err)
- }
- return mockFile, fileData
-}
-
-// writeFileDataToBlock writes random bytes to the block on disk.
-func writeFileDataToBlock(disk []byte, blkNum uint32, height uint, blkNums *blkNumGen) []byte {
- if height == 0 {
- start := blkNum * mockBMBlkSize
- end := start + mockBMBlkSize
- rand.Read(disk[start:end])
- return disk[start:end]
- }
-
- var fileData []byte
- for off := blkNum * mockBMBlkSize; off < (blkNum+1)*mockBMBlkSize; off += 4 {
- curBlkNum := primitive.Uint32(blkNums.next())
- curBlkNum.MarshalBytes(disk[off : off+4])
- fileData = append(fileData, writeFileDataToBlock(disk, uint32(curBlkNum), height-1, blkNums)...)
- }
- return fileData
-}
-
-// getMockBMFileFize gets the size of the mock block map file which is used for
-// testing.
-func getMockBMFileFize() uint32 {
- return uint32(numDirectBlks*getCoverage(uint64(mockBMBlkSize), 0) + getCoverage(uint64(mockBMBlkSize), 1) + getCoverage(uint64(mockBMBlkSize), 2) + getCoverage(uint64(mockBMBlkSize), 3))
-}
diff --git a/pkg/sentry/fsimpl/ext/dentry.go b/pkg/sentry/fsimpl/ext/dentry.go
deleted file mode 100644
index 9bfed883a..000000000
--- a/pkg/sentry/fsimpl/ext/dentry.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
-)
-
-// dentry implements vfs.DentryImpl.
-//
-// +stateify savable
-type dentry struct {
- vfsd vfs.Dentry
-
- // Protected by filesystem.mu.
- parent *dentry
- name string
-
- // inode is the inode represented by this dentry. Multiple Dentries may
- // share a single non-directory Inode (with hard links). inode is
- // immutable.
- inode *inode
-}
-
-// Compiles only if dentry implements vfs.DentryImpl.
-var _ vfs.DentryImpl = (*dentry)(nil)
-
-// newDentry is the dentry constructor.
-func newDentry(in *inode) *dentry {
- d := &dentry{
- inode: in,
- }
- d.vfsd.Init(d)
- return d
-}
-
-// IncRef implements vfs.DentryImpl.IncRef.
-func (d *dentry) IncRef() {
- d.inode.incRef()
-}
-
-// TryIncRef implements vfs.DentryImpl.TryIncRef.
-func (d *dentry) TryIncRef() bool {
- return d.inode.tryIncRef()
-}
-
-// DecRef implements vfs.DentryImpl.DecRef.
-func (d *dentry) DecRef(ctx context.Context) {
- // FIXME(b/134676337): filesystem.mu may not be locked as required by
- // inode.decRef().
- d.inode.decRef()
-}
-
-// InotifyWithParent implements vfs.DentryImpl.InotifyWithParent.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) InotifyWithParent(ctx context.Context, events, cookie uint32, et vfs.EventType) {}
-
-// Watches implements vfs.DentryImpl.Watches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) Watches() *vfs.Watches {
- return nil
-}
-
-// OnZeroWatches implements vfs.Dentry.OnZeroWatches.
-//
-// TODO(b/134676337): Implement inotify.
-func (d *dentry) OnZeroWatches(context.Context) {}
diff --git a/pkg/sentry/fsimpl/ext/directory.go b/pkg/sentry/fsimpl/ext/directory.go
deleted file mode 100644
index 512b70ede..000000000
--- a/pkg/sentry/fsimpl/ext/directory.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// directory represents a directory inode. It holds the childList in memory.
-//
-// +stateify savable
-type directory struct {
- inode inode
-
- // childCache maps filenames to dentries for children for which dentries
- // have been instantiated. childCache is protected by filesystem.mu.
- childCache map[string]*dentry
-
- // mu serializes the changes to childList.
- // Lock Order (outermost locks must be taken first):
- // directory.mu
- // filesystem.mu
- mu sync.Mutex `state:"nosave"`
-
- // childList is a list containing (1) child dirents and (2) fake dirents
- // (with diskDirent == nil) that represent the iteration position of
- // directoryFDs. childList is used to support directoryFD.IterDirents()
- // efficiently. childList is protected by mu.
- childList direntList
-
- // childMap maps the child's filename to the dirent structure stored in
- // childList. This adds some data replication but helps in faster path
- // traversal. For consistency, key == childMap[key].diskDirent.FileName().
- // Immutable.
- childMap map[string]*dirent
-}
-
-// newDirectory is the directory constructor.
-func newDirectory(args inodeArgs, newDirent bool) (*directory, error) {
- file := &directory{
- childCache: make(map[string]*dentry),
- childMap: make(map[string]*dirent),
- }
- file.inode.init(args, file)
-
- // Initialize childList by reading dirents from the underlying file.
- if args.diskInode.Flags().Index {
- // TODO(b/134676337): Support hash tree directories. Currently only the '.'
- // and '..' entries are read in.
-
- // Users cannot navigate this hash tree directory yet.
- log.Warningf("hash tree directory being used which is unsupported")
- return file, nil
- }
-
- // The dirents are organized in a linear array in the file data.
- // Extract the file data and decode the dirents.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- // buf is used as scratch space for reading in dirents from disk and
- // unmarshalling them into dirent structs.
- buf := make([]byte, disklayout.DirentSize)
- size := args.diskInode.Size()
- for off, inc := uint64(0), uint64(0); off < size; off += inc {
- toRead := size - off
- if toRead > disklayout.DirentSize {
- toRead = disklayout.DirentSize
- }
- if n, err := regFile.impl.ReadAt(buf[:toRead], int64(off)); uint64(n) < toRead {
- return nil, err
- }
-
- var curDirent dirent
- if newDirent {
- curDirent.diskDirent = &disklayout.DirentNew{}
- } else {
- curDirent.diskDirent = &disklayout.DirentOld{}
- }
- curDirent.diskDirent.UnmarshalBytes(buf)
-
- if curDirent.diskDirent.Inode() != 0 && len(curDirent.diskDirent.FileName()) != 0 {
- // Inode number and name length fields being set to 0 is used to indicate
- // an unused dirent.
- file.childList.PushBack(&curDirent)
- file.childMap[curDirent.diskDirent.FileName()] = &curDirent
- }
-
- // The next dirent is placed exactly after this dirent record on disk.
- inc = uint64(curDirent.diskDirent.RecordSize())
- }
-
- return file, nil
-}
-
-func (i *inode) isDir() bool {
- _, ok := i.impl.(*directory)
- return ok
-}
-
-// dirent is the directory.childList node.
-//
-// +stateify savable
-type dirent struct {
- diskDirent disklayout.Dirent
-
- // direntEntry links dirents into their parent directory.childList.
- direntEntry
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type directoryFD struct {
- fileDescription
- vfs.DirectoryFileDescriptionDefaultImpl
-
- // Protected by directory.mu.
- iter *dirent
- off int64
-}
-
-// Compiles only if directoryFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*directoryFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *directoryFD) Release(ctx context.Context) {
- if fd.iter == nil {
- return
- }
-
- dir := fd.inode().impl.(*directory)
- dir.mu.Lock()
- dir.childList.Remove(fd.iter)
- dir.mu.Unlock()
- fd.iter = nil
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *directoryFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- extfs := fd.filesystem()
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Ensure that fd.iter exists and is not linked into dir.childList.
- var child *dirent
- if fd.iter == nil {
- // Start iteration at the beginning of dir.
- child = dir.childList.Front()
- fd.iter = &dirent{}
- } else {
- // Continue iteration from where we left off.
- child = fd.iter.Next()
- dir.childList.Remove(fd.iter)
- }
- for ; child != nil; child = child.Next() {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- childType, ok := child.diskDirent.FileType()
- if !ok {
- // We will need to read the inode off disk. Do not increment
- // ref count here because this inode is not being added to the
- // dentry tree.
- extfs.mu.Lock()
- childInode, err := extfs.getOrCreateInodeLocked(child.diskDirent.Inode())
- extfs.mu.Unlock()
- if err != nil {
- // Usage of the file description after the error is
- // undefined. This implementation would continue reading
- // from the next dirent.
- fd.off++
- dir.childList.InsertAfter(child, fd.iter)
- return err
- }
- childType = fs.ToInodeType(childInode.diskInode.Mode().FileType())
- }
-
- if err := cb.Handle(vfs.Dirent{
- Name: child.diskDirent.FileName(),
- Type: fs.ToDirentType(childType),
- Ino: uint64(child.diskDirent.Inode()),
- NextOff: fd.off + 1,
- }); err != nil {
- dir.childList.InsertBefore(child, fd.iter)
- return err
- }
- fd.off++
- }
- }
- dir.childList.PushBack(fd.iter)
- return nil
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- if whence != linux.SEEK_SET && whence != linux.SEEK_CUR {
- return 0, syserror.EINVAL
- }
-
- dir := fd.inode().impl.(*directory)
-
- dir.mu.Lock()
- defer dir.mu.Unlock()
-
- // Find resulting offset.
- if whence == linux.SEEK_CUR {
- offset += fd.off
- }
-
- if offset < 0 {
- // lseek(2) specifies that EINVAL should be returned if the resulting offset
- // is negative.
- return 0, syserror.EINVAL
- }
-
- n := int64(len(dir.childMap))
- realWantOff := offset
- if realWantOff > n {
- realWantOff = n
- }
- realCurOff := fd.off
- if realCurOff > n {
- realCurOff = n
- }
-
- // Ensure that fd.iter exists and is linked into dir.childList so we can
- // intelligently seek from the optimal position.
- if fd.iter == nil {
- fd.iter = &dirent{}
- dir.childList.PushFront(fd.iter)
- }
-
- // Guess that iterating from the current position is optimal.
- child := fd.iter
- diff := realWantOff - realCurOff // Shows direction and magnitude of travel.
-
- // See if starting from the beginning or end is better.
- abDiff := diff
- if diff < 0 {
- abDiff = -diff
- }
- if abDiff > realWantOff {
- // Starting from the beginning is best.
- child = dir.childList.Front()
- diff = realWantOff
- } else if abDiff > (n - realWantOff) {
- // Starting from the end is best.
- child = dir.childList.Back()
- // (n - 1) because the last non-nil dirent represents the (n-1)th offset.
- diff = realWantOff - (n - 1)
- }
-
- for child != nil {
- // Skip other directoryFD iterators.
- if child.diskDirent != nil {
- if diff == 0 {
- if child != fd.iter {
- dir.childList.Remove(fd.iter)
- dir.childList.InsertBefore(child, fd.iter)
- }
-
- fd.off = offset
- return offset, nil
- }
-
- if diff < 0 {
- diff++
- child = child.Prev()
- } else {
- diff--
- child = child.Next()
- }
- continue
- }
-
- if diff < 0 {
- child = child.Prev()
- } else {
- child = child.Next()
- }
- }
-
- // Reaching here indicates that the offset is beyond the end of the childList.
- dir.childList.Remove(fd.iter)
- dir.childList.PushBack(fd.iter)
- fd.off = offset
- return offset, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/BUILD b/pkg/sentry/fsimpl/ext/disklayout/BUILD
deleted file mode 100644
index d98a05dd8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/BUILD
+++ /dev/null
@@ -1,48 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "disklayout",
- srcs = [
- "block_group.go",
- "block_group_32.go",
- "block_group_64.go",
- "dirent.go",
- "dirent_new.go",
- "dirent_old.go",
- "disklayout.go",
- "extent.go",
- "inode.go",
- "inode_new.go",
- "inode_old.go",
- "superblock.go",
- "superblock_32.go",
- "superblock_64.go",
- "superblock_old.go",
- "test_utils.go",
- ],
- marshal = True,
- visibility = ["//pkg/sentry:internal"],
- deps = [
- "//pkg/abi/linux",
- "//pkg/marshal",
- "//pkg/sentry/fs",
- "//pkg/sentry/kernel/auth",
- "//pkg/sentry/kernel/time",
- ],
-)
-
-go_test(
- name = "disklayout_test",
- size = "small",
- srcs = [
- "block_group_test.go",
- "dirent_test.go",
- "extent_test.go",
- "inode_test.go",
- "superblock_test.go",
- ],
- library = ":disklayout",
- deps = ["//pkg/sentry/kernel/time"],
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group.go b/pkg/sentry/fsimpl/ext/disklayout/block_group.go
deleted file mode 100644
index 0d56ae9da..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// BlockGroup represents a Linux ext block group descriptor. An ext file system
-// is split into a series of block groups. This provides an access layer to
-// information needed to access and use a block group.
-//
-// Location:
-// - The block group descriptor table is always placed in the blocks
-// immediately after the block containing the superblock.
-// - The 1st block group descriptor in the original table is in the
-// (sb.FirstDataBlock() + 1)th block.
-// - See SuperBlock docs to see where the block group descriptor table is
-// replicated.
-// - sb.BgDescSize() must be used as the block group descriptor entry size
-// while reading the table from disk.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#block-group-descriptors.
-type BlockGroup interface {
- marshal.Marshallable
-
- // InodeTable returns the absolute block number of the block containing the
- // inode table. This points to an array of Inode structs. Inode tables are
- // statically allocated at mkfs time. The superblock records the number of
- // inodes per group (length of this table) and the size of each inode struct.
- InodeTable() uint64
-
- // BlockBitmap returns the absolute block number of the block containing the
- // block bitmap. This bitmap tracks the usage of data blocks within this block
- // group and has its own checksum.
- BlockBitmap() uint64
-
- // InodeBitmap returns the absolute block number of the block containing the
- // inode bitmap. This bitmap tracks the usage of this group's inode table
- // entries and has its own checksum.
- InodeBitmap() uint64
-
- // ExclusionBitmap returns the absolute block number of the snapshot exclusion
- // bitmap.
- ExclusionBitmap() uint64
-
- // FreeBlocksCount returns the number of free blocks in the group.
- FreeBlocksCount() uint32
-
- // FreeInodesCount returns the number of free inodes in the group.
- FreeInodesCount() uint32
-
- // DirectoryCount returns the number of inodes that represent directories
- // under this block group.
- DirectoryCount() uint32
-
- // UnusedInodeCount returns the number of unused inodes beyond the last used
- // inode in this group's inode table. As a result, we needn’t scan past the
- // (InodesPerGroup - UnusedInodeCount())th entry in the inode table.
- UnusedInodeCount() uint32
-
- // BlockBitmapChecksum returns the block bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- BlockBitmapChecksum() uint32
-
- // InodeBitmapChecksum returns the inode bitmap checksum. This is calculated
- // using crc32c(FS UUID + group number + entire bitmap).
- InodeBitmapChecksum() uint32
-
- // Checksum returns this block group's checksum.
- //
- // If SbMetadataCsum feature is set:
- // - checksum is crc32c(FS UUID + group number + group descriptor
- // structure) & 0xFFFF.
- //
- // If SbGdtCsum feature is set:
- // - checksum is crc16(FS UUID + group number + group descriptor
- // structure).
- //
- // SbMetadataCsum and SbGdtCsum should not be both set.
- // If they are, Linux warns and asks to run fsck.
- Checksum() uint16
-
- // Flags returns BGFlags which represents the block group flags.
- Flags() BGFlags
-}
-
-// These are the different block group flags.
-const (
- // BgInodeUninit indicates that inode table and bitmap are not initialized.
- BgInodeUninit uint16 = 0x1
-
- // BgBlockUninit indicates that block bitmap is not initialized.
- BgBlockUninit uint16 = 0x2
-
- // BgInodeZeroed indicates that inode table is zeroed.
- BgInodeZeroed uint16 = 0x4
-)
-
-// BGFlags represents all the different combinations of block group flags.
-type BGFlags struct {
- InodeUninit bool
- BlockUninit bool
- InodeZeroed bool
-}
-
-// ToInt converts a BGFlags struct back to its 16-bit representation.
-func (f BGFlags) ToInt() uint16 {
- var res uint16
-
- if f.InodeUninit {
- res |= BgInodeUninit
- }
- if f.BlockUninit {
- res |= BgBlockUninit
- }
- if f.InodeZeroed {
- res |= BgInodeZeroed
- }
-
- return res
-}
-
-// BGFlagsFromInt converts the 16-bit flag representation to a BGFlags struct.
-func BGFlagsFromInt(flags uint16) BGFlags {
- return BGFlags{
- InodeUninit: flags&BgInodeUninit > 0,
- BlockUninit: flags&BgBlockUninit > 0,
- InodeZeroed: flags&BgInodeZeroed > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
deleted file mode 100644
index a35fa22a0..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup32Bit emulates the first half of struct ext4_group_desc in
-// fs/ext4/ext4.h. It is the block group descriptor struct for ext2, ext3 and
-// 32-bit ext4 filesystems. It implements BlockGroup interface.
-//
-// +marshal
-type BlockGroup32Bit struct {
- BlockBitmapLo uint32
- InodeBitmapLo uint32
- InodeTableLo uint32
- FreeBlocksCountLo uint16
- FreeInodesCountLo uint16
- UsedDirsCountLo uint16
- FlagsRaw uint16
- ExcludeBitmapLo uint32
- BlockBitmapChecksumLo uint16
- InodeBitmapChecksumLo uint16
- ItableUnusedLo uint16
- ChecksumRaw uint16
-}
-
-// Compiles only if BlockGroup32Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup32Bit)(nil)
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup32Bit) InodeTable() uint64 { return uint64(bg.InodeTableLo) }
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup32Bit) BlockBitmap() uint64 { return uint64(bg.BlockBitmapLo) }
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup32Bit) InodeBitmap() uint64 { return uint64(bg.InodeBitmapLo) }
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup32Bit) ExclusionBitmap() uint64 { return uint64(bg.ExcludeBitmapLo) }
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup32Bit) FreeBlocksCount() uint32 { return uint32(bg.FreeBlocksCountLo) }
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup32Bit) FreeInodesCount() uint32 { return uint32(bg.FreeInodesCountLo) }
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup32Bit) DirectoryCount() uint32 { return uint32(bg.UsedDirsCountLo) }
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup32Bit) UnusedInodeCount() uint32 { return uint32(bg.ItableUnusedLo) }
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup32Bit) BlockBitmapChecksum() uint32 { return uint32(bg.BlockBitmapChecksumLo) }
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup32Bit) InodeBitmapChecksum() uint32 { return uint32(bg.InodeBitmapChecksumLo) }
-
-// Checksum implements BlockGroup.Checksum.
-func (bg *BlockGroup32Bit) Checksum() uint16 { return bg.ChecksumRaw }
-
-// Flags implements BlockGroup.Flags.
-func (bg *BlockGroup32Bit) Flags() BGFlags { return BGFlagsFromInt(bg.FlagsRaw) }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
deleted file mode 100644
index d54d1d345..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_64.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// BlockGroup64Bit emulates struct ext4_group_desc in fs/ext4/ext4.h.
-// It is the block group descriptor struct for 64-bit ext4 filesystems.
-// It implements BlockGroup interface. It is an extension of the 32-bit
-// version of BlockGroup.
-//
-// +marshal
-type BlockGroup64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- BlockGroup32Bit
-
- // 64-bit specific fields.
- BlockBitmapHi uint32
- InodeBitmapHi uint32
- InodeTableHi uint32
- FreeBlocksCountHi uint16
- FreeInodesCountHi uint16
- UsedDirsCountHi uint16
- ItableUnusedHi uint16
- ExcludeBitmapHi uint32
- BlockBitmapChecksumHi uint16
- InodeBitmapChecksumHi uint16
- _ uint32 // Padding to 64 bytes.
-}
-
-// Compiles only if BlockGroup64Bit implements BlockGroup.
-var _ BlockGroup = (*BlockGroup64Bit)(nil)
-
-// Methods to override. Checksum() and Flags() are not overridden.
-
-// InodeTable implements BlockGroup.InodeTable.
-func (bg *BlockGroup64Bit) InodeTable() uint64 {
- return (uint64(bg.InodeTableHi) << 32) | uint64(bg.InodeTableLo)
-}
-
-// BlockBitmap implements BlockGroup.BlockBitmap.
-func (bg *BlockGroup64Bit) BlockBitmap() uint64 {
- return (uint64(bg.BlockBitmapHi) << 32) | uint64(bg.BlockBitmapLo)
-}
-
-// InodeBitmap implements BlockGroup.InodeBitmap.
-func (bg *BlockGroup64Bit) InodeBitmap() uint64 {
- return (uint64(bg.InodeBitmapHi) << 32) | uint64(bg.InodeBitmapLo)
-}
-
-// ExclusionBitmap implements BlockGroup.ExclusionBitmap.
-func (bg *BlockGroup64Bit) ExclusionBitmap() uint64 {
- return (uint64(bg.ExcludeBitmapHi) << 32) | uint64(bg.ExcludeBitmapLo)
-}
-
-// FreeBlocksCount implements BlockGroup.FreeBlocksCount.
-func (bg *BlockGroup64Bit) FreeBlocksCount() uint32 {
- return (uint32(bg.FreeBlocksCountHi) << 16) | uint32(bg.FreeBlocksCountLo)
-}
-
-// FreeInodesCount implements BlockGroup.FreeInodesCount.
-func (bg *BlockGroup64Bit) FreeInodesCount() uint32 {
- return (uint32(bg.FreeInodesCountHi) << 16) | uint32(bg.FreeInodesCountLo)
-}
-
-// DirectoryCount implements BlockGroup.DirectoryCount.
-func (bg *BlockGroup64Bit) DirectoryCount() uint32 {
- return (uint32(bg.UsedDirsCountHi) << 16) | uint32(bg.UsedDirsCountLo)
-}
-
-// UnusedInodeCount implements BlockGroup.UnusedInodeCount.
-func (bg *BlockGroup64Bit) UnusedInodeCount() uint32 {
- return (uint32(bg.ItableUnusedHi) << 16) | uint32(bg.ItableUnusedLo)
-}
-
-// BlockBitmapChecksum implements BlockGroup.BlockBitmapChecksum.
-func (bg *BlockGroup64Bit) BlockBitmapChecksum() uint32 {
- return (uint32(bg.BlockBitmapChecksumHi) << 16) | uint32(bg.BlockBitmapChecksumLo)
-}
-
-// InodeBitmapChecksum implements BlockGroup.InodeBitmapChecksum.
-func (bg *BlockGroup64Bit) InodeBitmapChecksum() uint32 {
- return (uint32(bg.InodeBitmapChecksumHi) << 16) | uint32(bg.InodeBitmapChecksumLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go b/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
deleted file mode 100644
index e4ce484e4..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/block_group_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestBlockGroupSize tests that the block group descriptor structs are of the
-// correct size.
-func TestBlockGroupSize(t *testing.T) {
- var bgSmall BlockGroup32Bit
- assertSize(t, &bgSmall, 32)
- var bgBig BlockGroup64Bit
- assertSize(t, &bgBig, 64)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent.go b/pkg/sentry/fsimpl/ext/disklayout/dirent.go
deleted file mode 100644
index 568c8cb4c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-const (
- // MaxFileName is the maximum length of an ext fs file's name.
- MaxFileName = 255
-
- // DirentSize is the size of ext dirent structures.
- DirentSize = 263
-)
-
-var (
- // inodeTypeByFileType maps ext4 file types to vfs inode types.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#ftype.
- inodeTypeByFileType = map[uint8]fs.InodeType{
- 0: fs.Anonymous,
- 1: fs.RegularFile,
- 2: fs.Directory,
- 3: fs.CharacterDevice,
- 4: fs.BlockDevice,
- 5: fs.Pipe,
- 6: fs.Socket,
- 7: fs.Symlink,
- }
-)
-
-// The Dirent interface should be implemented by structs representing ext
-// directory entries. These are for the linear classical directories which
-// just store a list of dirent structs. A directory is a series of data blocks
-// where is each data block contains a linear array of dirents. The last entry
-// of the block has a record size that takes it to the end of the block. The
-// end of the directory is when you read dirInode.Size() bytes from the blocks.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#linear-classic-directories.
-type Dirent interface {
- marshal.Marshallable
-
- // Inode returns the absolute inode number of the underlying inode.
- // Inode number 0 signifies an unused dirent.
- Inode() uint32
-
- // RecordSize returns the record length of this dirent on disk. The next
- // dirent in the dirent list should be read after these many bytes from
- // the current dirent. Must be a multiple of 4.
- RecordSize() uint16
-
- // FileName returns the name of the file. Can be at most 255 is length.
- FileName() string
-
- // FileType returns the inode type of the underlying inode. This is a
- // performance hack so that we do not have to read the underlying inode struct
- // to know the type of inode. This will only work when the SbDirentFileType
- // feature is set. If not, the second returned value will be false indicating
- // that user code has to use the inode mode to extract the file type.
- FileType() (fs.InodeType, bool)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
deleted file mode 100644
index 51f9c2946..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_new.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
-
- "gvisor.dev/gvisor/pkg/sentry/fs"
-)
-
-// DirentNew represents the ext4 directory entry struct. This emulates Linux's
-// ext4_dir_entry_2 struct. The FileName can not be more than 255 bytes so we
-// only need 8 bits to store the NameLength. As a result, NameLength has been
-// shortened and the other 8 bits are used to encode the file type. Use the
-// FileTypeRaw field only if the SbDirentFileType feature is set.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentNew struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint8
- FileTypeRaw uint8
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentNew implements Dirent.
-var _ Dirent = (*DirentNew)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentNew) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentNew) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentNew) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentNew) FileType() (fs.InodeType, bool) {
- if inodeType, ok := inodeTypeByFileType[d.FileTypeRaw]; ok {
- return inodeType, true
- }
-
- panic(fmt.Sprintf("unknown file type %v", d.FileTypeRaw))
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
deleted file mode 100644
index d4b19e086..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_old.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/fs"
-
-// DirentOld represents the old directory entry struct which does not contain
-// the file type. This emulates Linux's ext4_dir_entry struct.
-//
-// Note: This struct can be of variable size on disk. The one described below
-// is of maximum size and the FileName beyond NameLength bytes might contain
-// garbage.
-//
-// +marshal
-type DirentOld struct {
- InodeNumber uint32
- RecordLength uint16
- NameLength uint16
- FileNameRaw [MaxFileName]byte `marshal:"unaligned"`
-}
-
-// Compiles only if DirentOld implements Dirent.
-var _ Dirent = (*DirentOld)(nil)
-
-// Inode implements Dirent.Inode.
-func (d *DirentOld) Inode() uint32 { return d.InodeNumber }
-
-// RecordSize implements Dirent.RecordSize.
-func (d *DirentOld) RecordSize() uint16 { return d.RecordLength }
-
-// FileName implements Dirent.FileName.
-func (d *DirentOld) FileName() string {
- return string(d.FileNameRaw[:d.NameLength])
-}
-
-// FileType implements Dirent.FileType.
-func (d *DirentOld) FileType() (fs.InodeType, bool) {
- return fs.Anonymous, false
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go b/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
deleted file mode 100644
index 3486864dc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/dirent_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "testing"
-)
-
-// TestDirentSize tests that the dirent structs are of the correct
-// size.
-func TestDirentSize(t *testing.T) {
- var dOld DirentOld
- assertSize(t, &dOld, DirentSize)
- var dNew DirentNew
- assertSize(t, &dNew, DirentSize)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go b/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
deleted file mode 100644
index 0834e9ba8..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/disklayout.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package disklayout provides Linux ext file system's disk level structures
-// which can be directly read into from the underlying device. Structs aim to
-// emulate structures `exactly` how they are layed out on disk.
-//
-// This library aims to be compatible with all ext(2/3/4) systems so it
-// provides a generic interface for all major structures and various
-// implementations (for different versions). The user code is responsible for
-// using appropriate implementations based on the underlying device.
-//
-// Interfacing all major structures here serves a few purposes:
-// - Abstracts away the complexity of the underlying structure from client
-// code. The client only has to figure out versioning on set up and then
-// can use these as black boxes and pass it higher up the stack.
-// - Having pointer receivers forces the user to use pointers to these
-// heavy structs. Hence, prevents the client code from unintentionally
-// copying these by value while passing the interface around.
-// - Version-based implementation selection is resolved on set up hence
-// avoiding per call overhead of choosing implementation.
-// - All interface methods are pretty light weight (do not take in any
-// parameters by design). Passing pointer arguments to interface methods
-// can lead to heap allocation as the compiler won't be able to perform
-// escape analysis on an unknown implementation at compile time.
-//
-// Notes:
-// - All structures on disk are in little-endian order. Only jbd2 (journal)
-// structures are in big-endian order.
-// - All OS dependent fields in these structures will be interpretted using
-// the Linux version of that field.
-// - The suffix `Lo` in field names stands for lower bits of that field.
-// - The suffix `Hi` in field names stands for upper bits of that field.
-// - The suffix `Raw` has been added to indicate that the field is not split
-// into Lo and Hi fields and also to resolve name collision with the
-// respective interface.
-package disklayout
diff --git a/pkg/sentry/fsimpl/ext/disklayout/extent.go b/pkg/sentry/fsimpl/ext/disklayout/extent.go
deleted file mode 100644
index b13999bfc..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/extent.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-// Extents were introduced in ext4 and provide huge performance gains in terms
-// data locality and reduced metadata block usage. Extents are organized in
-// extent trees. The root node is contained in inode.BlocksRaw.
-//
-// Terminology:
-// - Physical Block:
-// Filesystem data block which is addressed normally wrt the entire
-// filesystem (addressed with 48 bits).
-//
-// - File Block:
-// Data block containing *only* file data and addressed wrt to the file
-// with only 32 bits. The (i)th file block contains file data from
-// byte (i * sb.BlockSize()) to ((i+1) * sb.BlockSize()).
-
-const (
- // ExtentHeaderSize is the size of the header of an extent tree node.
- ExtentHeaderSize = 12
-
- // ExtentEntrySize is the size of an entry in an extent tree node.
- // This size is the same for both leaf and internal nodes.
- ExtentEntrySize = 12
-
- // ExtentMagic is the magic number which must be present in the header.
- ExtentMagic = 0xf30a
-)
-
-// ExtentEntryPair couples an in-memory ExtendNode with the ExtentEntry that
-// points to it. We want to cache these structs in memory to avoid repeated
-// disk reads.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentEntryPair struct {
- // Entry points to the child node on disk.
- Entry ExtentEntry
- // Node points to child node in memory. Is nil if the current node is a leaf.
- Node *ExtentNode
-}
-
-// ExtentNode represents an extent tree node. For internal nodes, all Entries
-// will be ExtendIdxs. For leaf nodes, they will all be Extents.
-//
-// Note: This struct itself does not represent an on-disk struct.
-type ExtentNode struct {
- Header ExtentHeader
- Entries []ExtentEntryPair
-}
-
-// ExtentEntry represents an extent tree node entry. The entry can either be
-// an ExtentIdx or Extent itself. This exists to simplify navigation logic.
-type ExtentEntry interface {
- marshal.Marshallable
-
- // FileBlock returns the first file block number covered by this entry.
- FileBlock() uint32
-
- // PhysicalBlock returns the child physical block that this entry points to.
- PhysicalBlock() uint64
-}
-
-// ExtentHeader emulates the ext4_extent_header struct in ext4. Each extent
-// tree node begins with this and is followed by `NumEntries` number of:
-// - Extent if `Depth` == 0
-// - ExtentIdx otherwise
-//
-// +marshal
-type ExtentHeader struct {
- // Magic in the extent magic number, must be 0xf30a.
- Magic uint16
-
- // NumEntries indicates the number of valid entries following the header.
- NumEntries uint16
-
- // MaxEntries that could follow the header. Used while adding entries.
- MaxEntries uint16
-
- // Height represents the distance of this node from the farthest leaf. Please
- // note that Linux incorrectly calls this `Depth` (which means the distance
- // of the node from the root).
- Height uint16
- _ uint32
-}
-
-// ExtentIdx emulates the ext4_extent_idx struct in ext4. Only present in
-// internal nodes. Sorted in ascending order based on FirstFileBlock since
-// Linux does a binary search on this. This points to a block containing the
-// child node.
-//
-// +marshal
-type ExtentIdx struct {
- FirstFileBlock uint32
- ChildBlockLo uint32
- ChildBlockHi uint16
- _ uint16
-}
-
-// Compiles only if ExtentIdx implements ExtentEntry.
-var _ ExtentEntry = (*ExtentIdx)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (ei *ExtentIdx) FileBlock() uint32 {
- return ei.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the child block.
-func (ei *ExtentIdx) PhysicalBlock() uint64 {
- return (uint64(ei.ChildBlockHi) << 32) | uint64(ei.ChildBlockLo)
-}
-
-// Extent represents the ext4_extent struct in ext4. Only present in leaf
-// nodes. Sorted in ascending order based on FirstFileBlock since Linux does a
-// binary search on this. This points to an array of data blocks containing the
-// file data. It covers `Length` data blocks starting from `StartBlock`.
-//
-// +marshal
-type Extent struct {
- FirstFileBlock uint32
- Length uint16
- StartBlockHi uint16
- StartBlockLo uint32
-}
-
-// Compiles only if Extent implements ExtentEntry.
-var _ ExtentEntry = (*Extent)(nil)
-
-// FileBlock implements ExtentEntry.FileBlock.
-func (e *Extent) FileBlock() uint32 {
- return e.FirstFileBlock
-}
-
-// PhysicalBlock implements ExtentEntry.PhysicalBlock. It returns the
-// physical block number of the first data block this extent covers.
-func (e *Extent) PhysicalBlock() uint64 {
- return (uint64(e.StartBlockHi) << 32) | uint64(e.StartBlockLo)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode.go b/pkg/sentry/fsimpl/ext/disklayout/inode.go
deleted file mode 100644
index ef25040a9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// Special inodes. See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#special-inodes.
-const (
- // RootDirInode is the inode number of the root directory inode.
- RootDirInode = 2
-)
-
-// The Inode interface must be implemented by structs representing ext inodes.
-// The inode stores all the metadata pertaining to the file (except for the
-// file name which is held by the directory entry). It does NOT expose all
-// fields and should be extended if need be.
-//
-// Some file systems (e.g. FAT) use the directory entry to store all this
-// information. Ext file systems do not so that they can support hard links.
-// However, ext4 cheats a little bit and duplicates the file type in the
-// directory entry for performance gains.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#index-nodes.
-type Inode interface {
- marshal.Marshallable
-
- // Mode returns the linux file mode which is majorly used to extract
- // information like:
- // - File permissions (read/write/execute by user/group/others).
- // - Sticky, set UID and GID bits.
- // - File type.
- //
- // Masks to extract this information are provided in pkg/abi/linux/file.go.
- Mode() linux.FileMode
-
- // UID returns the owner UID.
- UID() auth.KUID
-
- // GID returns the owner GID.
- GID() auth.KGID
-
- // Size returns the size of the file in bytes.
- Size() uint64
-
- // InodeSize returns the size of this inode struct in bytes.
- // In ext2 and ext3, the inode struct and inode disk record size was fixed at
- // 128 bytes. Ext4 makes it possible for the inode struct to be bigger.
- // However, accessing any field beyond the 128 bytes marker must be verified
- // using this method.
- InodeSize() uint16
-
- // AccessTime returns the last access time. Shows when the file was last read.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the extended attribute value checksum.
- AccessTime() time.Time
-
- // ChangeTime returns the last change time. Shows when the file meta data
- // (like permissions) was last changed.
- //
- // If InExtendedAttr is set, then this should NOT be used because the
- // underlying field is used to store the lower 32 bits of the attribute
- // value’s reference count.
- ChangeTime() time.Time
-
- // ModificationTime returns the last modification time. Shows when the file
- // content was last modified.
- //
- // If InExtendedAttr is set, then this should NOT be used because
- // the underlying field contains the number of the inode that owns the
- // extended attribute.
- ModificationTime() time.Time
-
- // DeletionTime returns the deletion time. Inodes are marked as deleted by
- // writing to the underlying field. FS tools can restore files until they are
- // actually overwritten.
- DeletionTime() time.Time
-
- // LinksCount returns the number of hard links to this inode.
- //
- // Normally there is an upper limit on the number of hard links:
- // - ext2/ext3 = 32,000
- // - ext4 = 65,000
- //
- // This implies that an ext4 directory cannot have more than 64,998
- // subdirectories because each subdirectory will have a hard link to the
- // directory via the `..` entry. The directory has hard link via the `.` entry
- // of its own. And finally the inode is initiated with 1 hard link (itself).
- //
- // The underlying value is reset to 1 if all the following hold:
- // - Inode is a directory.
- // - SbDirNlink is enabled.
- // - Number of hard links is incremented past 64,999.
- // Hard link value of 1 for a directory would indicate that the number of hard
- // links is unknown because a directory can have minimum 2 hard links (itself
- // and `.` entry).
- LinksCount() uint16
-
- // Flags returns InodeFlags which represents the inode flags.
- Flags() InodeFlags
-
- // Data returns the underlying inode.i_block array as a slice so it's
- // modifiable. This field is special and is used to store various kinds of
- // things depending on the filesystem version and inode type. The underlying
- // field name in Linux is a little misleading.
- // - In ext2/ext3, it contains the block map.
- // - In ext4, it contains the extent tree root node.
- // - For inline files, it contains the file contents.
- // - For symlinks, it contains the link path (if it fits here).
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#the-contents-of-inode-i-block.
- Data() []byte
-}
-
-// Inode flags. This is not comprehensive and flags which were not used in
-// the Linux kernel have been excluded.
-const (
- // InSync indicates that all writes to the file must be synchronous.
- InSync = 0x8
-
- // InImmutable indicates that this file is immutable.
- InImmutable = 0x10
-
- // InAppend indicates that this file can only be appended to.
- InAppend = 0x20
-
- // InNoDump indicates that teh dump(1) utility should not dump this file.
- InNoDump = 0x40
-
- // InNoAccessTime indicates that the access time of this inode must not be
- // updated.
- InNoAccessTime = 0x80
-
- // InIndex indicates that this directory has hashed indexes.
- InIndex = 0x1000
-
- // InJournalData indicates that file data must always be written through a
- // journal device.
- InJournalData = 0x4000
-
- // InDirSync indicates that all the directory entiry data must be written
- // synchronously.
- InDirSync = 0x10000
-
- // InTopDir indicates that this inode is at the top of the directory hierarchy.
- InTopDir = 0x20000
-
- // InHugeFile indicates that this is a huge file.
- InHugeFile = 0x40000
-
- // InExtents indicates that this inode uses extents.
- InExtents = 0x80000
-
- // InExtendedAttr indicates that this inode stores a large extended attribute
- // value in its data blocks.
- InExtendedAttr = 0x200000
-
- // InInline indicates that this inode has inline data.
- InInline = 0x10000000
-
- // InReserved indicates that this inode is reserved for the ext4 library.
- InReserved = 0x80000000
-)
-
-// InodeFlags represents all possible combinations of inode flags. It aims to
-// cover the bit masks and provide a more user-friendly interface.
-type InodeFlags struct {
- Sync bool
- Immutable bool
- Append bool
- NoDump bool
- NoAccessTime bool
- Index bool
- JournalData bool
- DirSync bool
- TopDir bool
- HugeFile bool
- Extents bool
- ExtendedAttr bool
- Inline bool
- Reserved bool
-}
-
-// ToInt converts inode flags back to its 32-bit rep.
-func (f InodeFlags) ToInt() uint32 {
- var res uint32
-
- if f.Sync {
- res |= InSync
- }
- if f.Immutable {
- res |= InImmutable
- }
- if f.Append {
- res |= InAppend
- }
- if f.NoDump {
- res |= InNoDump
- }
- if f.NoAccessTime {
- res |= InNoAccessTime
- }
- if f.Index {
- res |= InIndex
- }
- if f.JournalData {
- res |= InJournalData
- }
- if f.DirSync {
- res |= InDirSync
- }
- if f.TopDir {
- res |= InTopDir
- }
- if f.HugeFile {
- res |= InHugeFile
- }
- if f.Extents {
- res |= InExtents
- }
- if f.ExtendedAttr {
- res |= InExtendedAttr
- }
- if f.Inline {
- res |= InInline
- }
- if f.Reserved {
- res |= InReserved
- }
-
- return res
-}
-
-// InodeFlagsFromInt converts the integer representation of inode flags to
-// a InodeFlags struct.
-func InodeFlagsFromInt(f uint32) InodeFlags {
- return InodeFlags{
- Sync: f&InSync > 0,
- Immutable: f&InImmutable > 0,
- Append: f&InAppend > 0,
- NoDump: f&InNoDump > 0,
- NoAccessTime: f&InNoAccessTime > 0,
- Index: f&InIndex > 0,
- JournalData: f&InJournalData > 0,
- DirSync: f&InDirSync > 0,
- TopDir: f&InTopDir > 0,
- HugeFile: f&InHugeFile > 0,
- Extents: f&InExtents > 0,
- ExtendedAttr: f&InExtendedAttr > 0,
- Inline: f&InInline > 0,
- Reserved: f&InReserved > 0,
- }
-}
-
-// These masks define how users can view/modify inode flags. The rest of the
-// flags are for internal kernel usage only.
-const (
- InUserReadFlagMask = 0x4BDFFF
- InUserWriteFlagMask = 0x4B80FF
-)
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go b/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
deleted file mode 100644
index a4503f5cf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_new.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-
-// InodeNew represents ext4 inode structure which can be bigger than
-// OldInodeSize. The actual size of this struct should be determined using
-// inode.ExtraInodeSize. Accessing any field here should be verified with the
-// actual size. The extra space between the end of the inode struct and end of
-// the inode record can be used to store extended attr.
-//
-// If the TimeExtra fields are in scope, the lower 2 bits of those are used
-// to extend their counter part to be 34 bits wide; the rest (upper) 30 bits
-// are used to provide nanoscond precision. Hence, these timestamps will now
-// overflow in May 2446.
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-//
-// +marshal
-type InodeNew struct {
- InodeOld
-
- ExtraInodeSize uint16
- ChecksumHi uint16
- ChangeTimeExtra uint32
- ModificationTimeExtra uint32
- AccessTimeExtra uint32
- CreationTime uint32
- CreationTimeExtra uint32
- VersionHi uint32
- ProjectID uint32
-}
-
-// Compiles only if InodeNew implements Inode.
-var _ Inode = (*InodeNew)(nil)
-
-// fromExtraTime decodes the extra time and constructs the kernel time struct
-// with nanosecond precision.
-func fromExtraTime(lo int32, extra uint32) time.Time {
- // See description above InodeNew for format.
- seconds := (int64(extra&0x3) << 32) + int64(lo)
- nanoseconds := int64(extra >> 2)
- return time.FromUnix(seconds, nanoseconds)
-}
-
-// Only override methods which change due to ext4 specific fields.
-
-// Size implements Inode.Size.
-func (in *InodeNew) Size() uint64 {
- return (uint64(in.SizeHi) << 32) | uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeNew) InodeSize() uint16 {
- return OldInodeSize + in.ExtraInodeSize
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeNew) ChangeTime() time.Time {
- // Apply new timestamp logic if inode.ChangeTimeExtra is in scope.
- if in.ExtraInodeSize >= 8 {
- return fromExtraTime(in.ChangeTimeRaw, in.ChangeTimeExtra)
- }
-
- return in.InodeOld.ChangeTime()
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeNew) ModificationTime() time.Time {
- // Apply new timestamp logic if inode.ModificationTimeExtra is in scope.
- if in.ExtraInodeSize >= 12 {
- return fromExtraTime(in.ModificationTimeRaw, in.ModificationTimeExtra)
- }
-
- return in.InodeOld.ModificationTime()
-}
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeNew) AccessTime() time.Time {
- // Apply new timestamp logic if inode.AccessTimeExtra is in scope.
- if in.ExtraInodeSize >= 16 {
- return fromExtraTime(in.AccessTimeRaw, in.AccessTimeExtra)
- }
-
- return in.InodeOld.AccessTime()
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go b/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
deleted file mode 100644
index e6b28babf..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_old.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-const (
- // OldInodeSize is the inode size in ext2/ext3.
- OldInodeSize = 128
-)
-
-// InodeOld implements Inode interface. It emulates ext2/ext3 inode struct.
-// Inode struct size and record size are both 128 bytes for this.
-//
-// All fields representing time are in seconds since the epoch. Which means that
-// they will overflow in January 2038.
-//
-// +marshal
-type InodeOld struct {
- ModeRaw uint16
- UIDLo uint16
- SizeLo uint32
-
- // The time fields are signed integers because they could be negative to
- // represent time before the epoch.
- AccessTimeRaw int32
- ChangeTimeRaw int32
- ModificationTimeRaw int32
- DeletionTimeRaw int32
-
- GIDLo uint16
- LinksCountRaw uint16
- BlocksCountLo uint32
- FlagsRaw uint32
- VersionLo uint32 // This is OS dependent.
- DataRaw [60]byte
- Generation uint32
- FileACLLo uint32
- SizeHi uint32
- ObsoFaddr uint32
-
- // OS dependent fields have been inlined here.
- BlocksCountHi uint16
- FileACLHi uint16
- UIDHi uint16
- GIDHi uint16
- ChecksumLo uint16
- _ uint16
-}
-
-// Compiles only if InodeOld implements Inode.
-var _ Inode = (*InodeOld)(nil)
-
-// Mode implements Inode.Mode.
-func (in *InodeOld) Mode() linux.FileMode { return linux.FileMode(in.ModeRaw) }
-
-// UID implements Inode.UID.
-func (in *InodeOld) UID() auth.KUID {
- return auth.KUID((uint32(in.UIDHi) << 16) | uint32(in.UIDLo))
-}
-
-// GID implements Inode.GID.
-func (in *InodeOld) GID() auth.KGID {
- return auth.KGID((uint32(in.GIDHi) << 16) | uint32(in.GIDLo))
-}
-
-// Size implements Inode.Size.
-func (in *InodeOld) Size() uint64 {
- // In ext2/ext3, in.SizeHi did not exist, it was instead named in.DirACL.
- return uint64(in.SizeLo)
-}
-
-// InodeSize implements Inode.InodeSize.
-func (in *InodeOld) InodeSize() uint16 { return OldInodeSize }
-
-// AccessTime implements Inode.AccessTime.
-func (in *InodeOld) AccessTime() time.Time {
- return time.FromUnix(int64(in.AccessTimeRaw), 0)
-}
-
-// ChangeTime implements Inode.ChangeTime.
-func (in *InodeOld) ChangeTime() time.Time {
- return time.FromUnix(int64(in.ChangeTimeRaw), 0)
-}
-
-// ModificationTime implements Inode.ModificationTime.
-func (in *InodeOld) ModificationTime() time.Time {
- return time.FromUnix(int64(in.ModificationTimeRaw), 0)
-}
-
-// DeletionTime implements Inode.DeletionTime.
-func (in *InodeOld) DeletionTime() time.Time {
- return time.FromUnix(int64(in.DeletionTimeRaw), 0)
-}
-
-// LinksCount implements Inode.LinksCount.
-func (in *InodeOld) LinksCount() uint16 { return in.LinksCountRaw }
-
-// Flags implements Inode.Flags.
-func (in *InodeOld) Flags() InodeFlags { return InodeFlagsFromInt(in.FlagsRaw) }
-
-// Data implements Inode.Data.
-func (in *InodeOld) Data() []byte { return in.DataRaw[:] }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go b/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
deleted file mode 100644
index 90744e956..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/inode_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "fmt"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/sentry/kernel/time"
-)
-
-// TestInodeSize tests that the inode structs are of the correct size.
-func TestInodeSize(t *testing.T) {
- var iOld InodeOld
- assertSize(t, &iOld, OldInodeSize)
-
- // This was updated from 156 bytes to 160 bytes in Oct 2015.
- var iNew InodeNew
- assertSize(t, &iNew, 160)
-}
-
-// TestTimestampSeconds tests that the seconds part of [a/c/m] timestamps in
-// ext4 inode structs are decoded correctly.
-//
-// These tests are derived from the table under https://www.kernel.org/doc/html/latest/filesystems/ext4/dynamic.html#inode-timestamps.
-func TestTimestampSeconds(t *testing.T) {
- type timestampTest struct {
- // msbSet tells if the most significant bit of InodeOld.[X]TimeRaw is set.
- // If this is set then the 32-bit time is negative.
- msbSet bool
-
- // lowerBound tells if we should take the lowest possible value of
- // InodeOld.[X]TimeRaw while satisfying test.msbSet condition. If set to
- // false it tells to take the highest possible value.
- lowerBound bool
-
- // extraBits is InodeNew.[X]TimeExtra.
- extraBits uint32
-
- // want is the kernel time struct that is expected.
- want time.Time
- }
-
- tests := []timestampTest{
- // 1901-12-13
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(-0x80000000), 0),
- },
-
- // 1969-12-31
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(-1), 0),
- },
-
- // 1970-01-01
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 0,
- want: time.FromUnix(int64(0), 0),
- },
-
- // 2038-01-19
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 0,
- want: time.FromUnix(int64(0x7fffffff), 0),
- },
-
- // 2038-01-19
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x80000000), 0),
- },
-
- // 2106-02-07
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0xffffffff), 0),
- },
-
- // 2106-02-07
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 1,
- want: time.FromUnix(int64(0x100000000), 0),
- },
-
- // 2174-02-25
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 1,
- want: time.FromUnix(int64(0x17fffffff), 0),
- },
-
- // 2174-02-25
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x180000000), 0),
- },
-
- // 2242-03-16
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x1ffffffff), 0),
- },
-
- // 2242-03-16
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 2,
- want: time.FromUnix(int64(0x200000000), 0),
- },
-
- // 2310-04-04
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 2,
- want: time.FromUnix(int64(0x27fffffff), 0),
- },
-
- // 2310-04-04
- {
- msbSet: true,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x280000000), 0),
- },
-
- // 2378-04-22
- {
- msbSet: true,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x2ffffffff), 0),
- },
-
- // 2378-04-22
- {
- msbSet: false,
- lowerBound: true,
- extraBits: 3,
- want: time.FromUnix(int64(0x300000000), 0),
- },
-
- // 2446-05-10
- {
- msbSet: false,
- lowerBound: false,
- extraBits: 3,
- want: time.FromUnix(int64(0x37fffffff), 0),
- },
- }
-
- lowerMSB0 := int32(0) // binary: 00000000 00000000 00000000 00000000
- upperMSB0 := int32(0x7fffffff) // binary: 01111111 11111111 11111111 11111111
- lowerMSB1 := int32(-0x80000000) // binary: 10000000 00000000 00000000 00000000
- upperMSB1 := int32(-1) // binary: 11111111 11111111 11111111 11111111
-
- get32BitTime := func(test timestampTest) int32 {
- if test.msbSet {
- if test.lowerBound {
- return lowerMSB1
- }
-
- return upperMSB1
- }
-
- if test.lowerBound {
- return lowerMSB0
- }
-
- return upperMSB0
- }
-
- getTestName := func(test timestampTest) string {
- return fmt.Sprintf(
- "Tests time decoding with epoch bits 0b%s and 32-bit raw time: MSB set=%t, lower bound=%t",
- strconv.FormatInt(int64(test.extraBits), 2),
- test.msbSet,
- test.lowerBound,
- )
- }
-
- for _, test := range tests {
- t.Run(getTestName(test), func(t *testing.T) {
- if got := fromExtraTime(get32BitTime(test), test.extraBits); got != test.want {
- t.Errorf("Expected: %v, Got: %v", test.want, got)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock.go b/pkg/sentry/fsimpl/ext/disklayout/superblock.go
deleted file mode 100644
index 70948ebe9..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock.go
+++ /dev/null
@@ -1,477 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-import (
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-const (
- // SbOffset is the absolute offset at which the superblock is placed.
- SbOffset = 1024
-)
-
-// SuperBlock should be implemented by structs representing the ext superblock.
-// The superblock holds a lot of information about the enclosing filesystem.
-// This interface aims to provide access methods to important information held
-// by the superblock. It does NOT expose all fields of the superblock, only the
-// ones necessary. This can be expanded when need be.
-//
-// Location and replication:
-// - The superblock is located at offset 1024 in block group 0.
-// - Redundant copies of the superblock and group descriptors are kept in
-// all groups if SbSparse feature flag is NOT set. If it is set, the
-// replicas only exist in groups whose group number is either 0 or a
-// power of 3, 5, or 7.
-// - There is also a sparse superblock feature v2 in which there are just
-// two replicas saved in the block groups pointed by sb.s_backup_bgs.
-//
-// Replicas should eventually be updated if the superblock is updated.
-//
-// See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#super-block.
-type SuperBlock interface {
- marshal.Marshallable
-
- // InodesCount returns the total number of inodes in this filesystem.
- InodesCount() uint32
-
- // BlocksCount returns the total number of data blocks in this filesystem.
- BlocksCount() uint64
-
- // FreeBlocksCount returns the number of free blocks in this filesystem.
- FreeBlocksCount() uint64
-
- // FreeInodesCount returns the number of free inodes in this filesystem.
- FreeInodesCount() uint32
-
- // MountCount returns the number of mounts since the last fsck.
- MountCount() uint16
-
- // MaxMountCount returns the number of mounts allowed beyond which a fsck is
- // needed.
- MaxMountCount() uint16
-
- // FirstDataBlock returns the absolute block number of the first data block,
- // which contains the super block itself.
- //
- // If the filesystem has 1kb data blocks then this should return 1. For all
- // other configurations, this typically returns 0.
- FirstDataBlock() uint32
-
- // BlockSize returns the size of one data block in this filesystem.
- // This can be calculated by 2^(10 + sb.s_log_block_size). This ensures that
- // the smallest block size is 1kb.
- BlockSize() uint64
-
- // BlocksPerGroup returns the number of data blocks in a block group.
- BlocksPerGroup() uint32
-
- // ClusterSize returns block cluster size (set during mkfs time by admin).
- // This can be calculated by 2^(10 + sb.s_log_cluster_size). This ensures that
- // the smallest cluster size is 1kb.
- //
- // sb.s_log_cluster_size must equal sb.s_log_block_size if bigalloc feature
- // is NOT set and consequently BlockSize() = ClusterSize() in that case.
- ClusterSize() uint64
-
- // ClustersPerGroup returns:
- // - number of clusters per group if bigalloc is enabled.
- // - BlocksPerGroup() otherwise.
- ClustersPerGroup() uint32
-
- // InodeSize returns the size of the inode disk record size in bytes. Use this
- // to iterate over inode arrays on disk.
- //
- // In ext2 and ext3:
- // - Each inode had a disk record of 128 bytes.
- // - The inode struct size was fixed at 128 bytes.
- //
- // In ext4 its possible to allocate larger on-disk inodes:
- // - Inode disk record size = sb.s_inode_size (function return value).
- // = 256 (default)
- // - Inode struct size = 128 + inode.i_extra_isize.
- // = 128 + 32 = 160 (default)
- InodeSize() uint16
-
- // InodesPerGroup returns the number of inodes in a block group.
- InodesPerGroup() uint32
-
- // BgDescSize returns the size of the block group descriptor struct.
- //
- // In ext2, ext3, ext4 (without 64-bit feature), the block group descriptor
- // is only 32 bytes long.
- // In ext4 with 64-bit feature, the block group descriptor expands to AT LEAST
- // 64 bytes. It might be bigger than that.
- BgDescSize() uint16
-
- // CompatibleFeatures returns the CompatFeatures struct which holds all the
- // compatible features this fs supports.
- CompatibleFeatures() CompatFeatures
-
- // IncompatibleFeatures returns the CompatFeatures struct which holds all the
- // incompatible features this fs supports.
- IncompatibleFeatures() IncompatFeatures
-
- // ReadOnlyCompatibleFeatures returns the CompatFeatures struct which holds all the
- // readonly compatible features this fs supports.
- ReadOnlyCompatibleFeatures() RoCompatFeatures
-
- // Magic() returns the magic signature which must be 0xef53.
- Magic() uint16
-
- // Revision returns the superblock revision. Superblock struct fields from
- // offset 0x54 till 0x150 should only be used if superblock has DynamicRev.
- Revision() SbRevision
-}
-
-// SbRevision is the type for superblock revisions.
-type SbRevision uint32
-
-// Super block revisions.
-const (
- // OldRev is the good old (original) format.
- OldRev SbRevision = 0
-
- // DynamicRev is v2 format w/ dynamic inode sizes.
- DynamicRev SbRevision = 1
-)
-
-// Superblock compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirPrealloc indicates directory preallocation.
- SbDirPrealloc = 0x1
-
- // SbHasJournal indicates the presence of a journal. jbd2 should only work
- // with this being set.
- SbHasJournal = 0x4
-
- // SbExtAttr indicates extended attributes support.
- SbExtAttr = 0x8
-
- // SbResizeInode indicates that the fs has reserved GDT blocks (right after
- // group descriptors) for fs expansion.
- SbResizeInode = 0x10
-
- // SbDirIndex indicates that the fs has directory indices.
- SbDirIndex = 0x20
-
- // SbSparseV2 stands for Sparse superblock version 2.
- SbSparseV2 = 0x200
-)
-
-// CompatFeatures represents a superblock's compatible feature set. If the
-// kernel does not understand any of these feature, it can still read/write
-// to this fs.
-type CompatFeatures struct {
- DirPrealloc bool
- HasJournal bool
- ExtAttr bool
- ResizeInode bool
- DirIndex bool
- SparseV2 bool
-}
-
-// ToInt converts superblock compatible features back to its 32-bit rep.
-func (f CompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirPrealloc {
- res |= SbDirPrealloc
- }
- if f.HasJournal {
- res |= SbHasJournal
- }
- if f.ExtAttr {
- res |= SbExtAttr
- }
- if f.ResizeInode {
- res |= SbResizeInode
- }
- if f.DirIndex {
- res |= SbDirIndex
- }
- if f.SparseV2 {
- res |= SbSparseV2
- }
-
- return res
-}
-
-// CompatFeaturesFromInt converts the integer representation of superblock
-// compatible features to CompatFeatures struct.
-func CompatFeaturesFromInt(f uint32) CompatFeatures {
- return CompatFeatures{
- DirPrealloc: f&SbDirPrealloc > 0,
- HasJournal: f&SbHasJournal > 0,
- ExtAttr: f&SbExtAttr > 0,
- ResizeInode: f&SbResizeInode > 0,
- DirIndex: f&SbDirIndex > 0,
- SparseV2: f&SbSparseV2 > 0,
- }
-}
-
-// Superblock incompatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbDirentFileType indicates that directory entries record the file type.
- // We should use struct DirentNew for dirents then.
- SbDirentFileType = 0x2
-
- // SbRecovery indicates that the filesystem needs recovery.
- SbRecovery = 0x4
-
- // SbJournalDev indicates that the filesystem has a separate journal device.
- SbJournalDev = 0x8
-
- // SbMetaBG indicates that the filesystem is using Meta block groups. Moves
- // the group descriptors from the congested first block group into the first
- // group of each metablock group to increase the maximum block groups limit
- // and hence support much larger filesystems.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#meta-block-groups.
- SbMetaBG = 0x10
-
- // SbExtents indicates that the filesystem uses extents. Must be set in ext4
- // filesystems.
- SbExtents = 0x40
-
- // SbIs64Bit indicates that this filesystem addresses blocks with 64-bits.
- // Hence can support 2^64 data blocks.
- SbIs64Bit = 0x80
-
- // SbMMP indicates that this filesystem has multiple mount protection.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/globals.html#multiple-mount-protection.
- SbMMP = 0x100
-
- // SbFlexBg indicates that this filesystem has flexible block groups. Several
- // block groups are tied into one logical block group so that all the metadata
- // for the block groups (bitmaps and inode tables) are close together for
- // faster loading. Consequently, large files will be continuous on disk.
- // However, this does not affect the placement of redundant superblocks and
- // group descriptors.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#flexible-block-groups.
- SbFlexBg = 0x200
-
- // SbLargeDir shows that large directory enabled. Directory htree can be 3
- // levels deep. Directory htrees are allowed to be 2 levels deep otherwise.
- SbLargeDir = 0x4000
-
- // SbInlineData allows inline data in inodes for really small files.
- SbInlineData = 0x8000
-
- // SbEncrypted indicates that this fs contains encrypted inodes.
- SbEncrypted = 0x10000
-)
-
-// IncompatFeatures represents a superblock's incompatible feature set. If the
-// kernel does not understand any of these feature, it should refuse to mount.
-type IncompatFeatures struct {
- DirentFileType bool
- Recovery bool
- JournalDev bool
- MetaBG bool
- Extents bool
- Is64Bit bool
- MMP bool
- FlexBg bool
- LargeDir bool
- InlineData bool
- Encrypted bool
-}
-
-// ToInt converts superblock incompatible features back to its 32-bit rep.
-func (f IncompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.DirentFileType {
- res |= SbDirentFileType
- }
- if f.Recovery {
- res |= SbRecovery
- }
- if f.JournalDev {
- res |= SbJournalDev
- }
- if f.MetaBG {
- res |= SbMetaBG
- }
- if f.Extents {
- res |= SbExtents
- }
- if f.Is64Bit {
- res |= SbIs64Bit
- }
- if f.MMP {
- res |= SbMMP
- }
- if f.FlexBg {
- res |= SbFlexBg
- }
- if f.LargeDir {
- res |= SbLargeDir
- }
- if f.InlineData {
- res |= SbInlineData
- }
- if f.Encrypted {
- res |= SbEncrypted
- }
-
- return res
-}
-
-// IncompatFeaturesFromInt converts the integer representation of superblock
-// incompatible features to IncompatFeatures struct.
-func IncompatFeaturesFromInt(f uint32) IncompatFeatures {
- return IncompatFeatures{
- DirentFileType: f&SbDirentFileType > 0,
- Recovery: f&SbRecovery > 0,
- JournalDev: f&SbJournalDev > 0,
- MetaBG: f&SbMetaBG > 0,
- Extents: f&SbExtents > 0,
- Is64Bit: f&SbIs64Bit > 0,
- MMP: f&SbMMP > 0,
- FlexBg: f&SbFlexBg > 0,
- LargeDir: f&SbLargeDir > 0,
- InlineData: f&SbInlineData > 0,
- Encrypted: f&SbEncrypted > 0,
- }
-}
-
-// Superblock readonly compatible features.
-// This is not exhaustive, unused features are not listed.
-const (
- // SbSparse indicates sparse superblocks. Only groups with number either 0 or
- // a power of 3, 5, or 7 will have redundant copies of the superblock and
- // block descriptors.
- SbSparse = 0x1
-
- // SbLargeFile indicates that this fs has been used to store a file >= 2GiB.
- SbLargeFile = 0x2
-
- // SbHugeFile indicates that this fs contains files whose sizes are
- // represented in units of logicals blocks, not 512-byte sectors.
- SbHugeFile = 0x8
-
- // SbGdtCsum indicates that group descriptors have checksums.
- SbGdtCsum = 0x10
-
- // SbDirNlink indicates that the new subdirectory limit is 64,999. Ext3 has a
- // 32,000 subdirectory limit.
- SbDirNlink = 0x20
-
- // SbExtraIsize indicates that large inodes exist on this filesystem.
- SbExtraIsize = 0x40
-
- // SbHasSnapshot indicates the existence of a snapshot.
- SbHasSnapshot = 0x80
-
- // SbQuota enables usage tracking for all quota types.
- SbQuota = 0x100
-
- // SbBigalloc maps to the bigalloc feature. When set, the minimum allocation
- // unit becomes a cluster rather than a data block. Then block bitmaps track
- // clusters, not data blocks.
- //
- // See https://www.kernel.org/doc/html/latest/filesystems/ext4/overview.html#bigalloc.
- SbBigalloc = 0x200
-
- // SbMetadataCsum indicates that the fs supports metadata checksumming.
- SbMetadataCsum = 0x400
-
- // SbReadOnly marks this filesystem as readonly. Should refuse to mount in
- // read/write mode.
- SbReadOnly = 0x1000
-)
-
-// RoCompatFeatures represents a superblock's readonly compatible feature set.
-// If the kernel does not understand any of these feature, it can still mount
-// readonly. But if the user wants to mount read/write, the kernel should
-// refuse to mount.
-type RoCompatFeatures struct {
- Sparse bool
- LargeFile bool
- HugeFile bool
- GdtCsum bool
- DirNlink bool
- ExtraIsize bool
- HasSnapshot bool
- Quota bool
- Bigalloc bool
- MetadataCsum bool
- ReadOnly bool
-}
-
-// ToInt converts superblock readonly compatible features to its 32-bit rep.
-func (f RoCompatFeatures) ToInt() uint32 {
- var res uint32
-
- if f.Sparse {
- res |= SbSparse
- }
- if f.LargeFile {
- res |= SbLargeFile
- }
- if f.HugeFile {
- res |= SbHugeFile
- }
- if f.GdtCsum {
- res |= SbGdtCsum
- }
- if f.DirNlink {
- res |= SbDirNlink
- }
- if f.ExtraIsize {
- res |= SbExtraIsize
- }
- if f.HasSnapshot {
- res |= SbHasSnapshot
- }
- if f.Quota {
- res |= SbQuota
- }
- if f.Bigalloc {
- res |= SbBigalloc
- }
- if f.MetadataCsum {
- res |= SbMetadataCsum
- }
- if f.ReadOnly {
- res |= SbReadOnly
- }
-
- return res
-}
-
-// RoCompatFeaturesFromInt converts the integer representation of superblock
-// readonly compatible features to RoCompatFeatures struct.
-func RoCompatFeaturesFromInt(f uint32) RoCompatFeatures {
- return RoCompatFeatures{
- Sparse: f&SbSparse > 0,
- LargeFile: f&SbLargeFile > 0,
- HugeFile: f&SbHugeFile > 0,
- GdtCsum: f&SbGdtCsum > 0,
- DirNlink: f&SbDirNlink > 0,
- ExtraIsize: f&SbExtraIsize > 0,
- HasSnapshot: f&SbHasSnapshot > 0,
- Quota: f&SbQuota > 0,
- Bigalloc: f&SbBigalloc > 0,
- MetadataCsum: f&SbMetadataCsum > 0,
- ReadOnly: f&SbReadOnly > 0,
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
deleted file mode 100644
index 4dc6080fb..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_32.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock32Bit implements SuperBlock and represents the 32-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. Should be used only if
-// RevLevel = DynamicRev and 64-bit feature is disabled.
-//
-// +marshal
-type SuperBlock32Bit struct {
- // We embed the old superblock struct here because the 32-bit version is just
- // an extension of the old version.
- SuperBlockOld
-
- FirstInode uint32
- InodeSizeRaw uint16
- BlockGroupNumber uint16
- FeatureCompat uint32
- FeatureIncompat uint32
- FeatureRoCompat uint32
- UUID [16]byte
- VolumeName [16]byte
- LastMounted [64]byte
- AlgoUsageBitmap uint32
- PreallocBlocks uint8
- PreallocDirBlocks uint8
- ReservedGdtBlocks uint16
- JournalUUID [16]byte
- JournalInum uint32
- JournalDev uint32
- LastOrphan uint32
- HashSeed [4]uint32
- DefaultHashVersion uint8
- JnlBackupType uint8
- BgDescSizeRaw uint16
- DefaultMountOpts uint32
- FirstMetaBg uint32
- MkfsTime uint32
- JnlBlocks [17]uint32
-}
-
-// Compiles only if SuperBlock32Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock32Bit)(nil)
-
-// Only override methods which change based on the additional fields above.
-// Not overriding SuperBlock.BgDescSize because it would still return 32 here.
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlock32Bit) InodeSize() uint16 {
- return sb.InodeSizeRaw
-}
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlock32Bit) CompatibleFeatures() CompatFeatures {
- return CompatFeaturesFromInt(sb.FeatureCompat)
-}
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlock32Bit) IncompatibleFeatures() IncompatFeatures {
- return IncompatFeaturesFromInt(sb.FeatureIncompat)
-}
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlock32Bit) ReadOnlyCompatibleFeatures() RoCompatFeatures {
- return RoCompatFeaturesFromInt(sb.FeatureRoCompat)
-}
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
deleted file mode 100644
index 2c9039327..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_64.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlock64Bit implements SuperBlock and represents the 64-bit version of
-// the ext4_super_block struct in fs/ext4/ext4.h. This sums up to be exactly
-// 1024 bytes (smallest possible block size) and hence the superblock always
-// fits in no more than one data block. Should only be used when the 64-bit
-// feature is set.
-//
-// +marshal
-type SuperBlock64Bit struct {
- // We embed the 32-bit struct here because 64-bit version is just an extension
- // of the 32-bit version.
- SuperBlock32Bit
-
- BlocksCountHi uint32
- ReservedBlocksCountHi uint32
- FreeBlocksCountHi uint32
- MinInodeSize uint16
- WantInodeSize uint16
- Flags uint32
- RaidStride uint16
- MmpInterval uint16
- MmpBlock uint64
- RaidStripeWidth uint32
- LogGroupsPerFlex uint8
- ChecksumType uint8
- _ uint16
- KbytesWritten uint64
- SnapshotInum uint32
- SnapshotID uint32
- SnapshotRsrvBlocksCount uint64
- SnapshotList uint32
- ErrorCount uint32
- FirstErrorTime uint32
- FirstErrorInode uint32
- FirstErrorBlock uint64
- FirstErrorFunction [32]byte
- FirstErrorLine uint32
- LastErrorTime uint32
- LastErrorInode uint32
- LastErrorLine uint32
- LastErrorBlock uint64
- LastErrorFunction [32]byte
- MountOpts [64]byte
- UserQuotaInum uint32
- GroupQuotaInum uint32
- OverheadBlocks uint32
- BackupBgs [2]uint32
- EncryptAlgos [4]uint8
- EncryptPwSalt [16]uint8
- LostFoundInode uint32
- ProjectQuotaInode uint32
- ChecksumSeed uint32
- WtimeHi uint8
- MtimeHi uint8
- MkfsTimeHi uint8
- LastCheckHi uint8
- FirstErrorTimeHi uint8
- LastErrorTimeHi uint8
- _ [2]uint8
- Encoding uint16
- EncodingFlags uint16
- _ [95]uint32
- Checksum uint32
-}
-
-// Compiles only if SuperBlock64Bit implements SuperBlock.
-var _ SuperBlock = (*SuperBlock64Bit)(nil)
-
-// Only override methods which change based on the 64-bit feature.
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlock64Bit) BlocksCount() uint64 {
- return (uint64(sb.BlocksCountHi) << 32) | uint64(sb.BlocksCountLo)
-}
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlock64Bit) FreeBlocksCount() uint64 {
- return (uint64(sb.FreeBlocksCountHi) << 32) | uint64(sb.FreeBlocksCountLo)
-}
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlock64Bit) BgDescSize() uint16 { return sb.BgDescSizeRaw }
diff --git a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go b/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
deleted file mode 100644
index e4709f23c..000000000
--- a/pkg/sentry/fsimpl/ext/disklayout/superblock_old.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package disklayout
-
-// SuperBlockOld implements SuperBlock and represents the old version of the
-// superblock struct. Should be used only if RevLevel = OldRev.
-//
-// +marshal
-type SuperBlockOld struct {
- InodesCountRaw uint32
- BlocksCountLo uint32
- ReservedBlocksCount uint32
- FreeBlocksCountLo uint32
- FreeInodesCountRaw uint32
- FirstDataBlockRaw uint32
- LogBlockSize uint32
- LogClusterSize uint32
- BlocksPerGroupRaw uint32
- ClustersPerGroupRaw uint32
- InodesPerGroupRaw uint32
- Mtime uint32
- Wtime uint32
- MountCountRaw uint16
- MaxMountCountRaw uint16
- MagicRaw uint16
- State uint16
- Errors uint16
- MinorRevLevel uint16
- LastCheck uint32
- CheckInterval uint32
- CreatorOS uint32
- RevLevel uint32
- DefResUID uint16
- DefResGID uint16
-}
-
-// Compiles only if SuperBlockOld implements SuperBlock.
-var _ SuperBlock = (*SuperBlockOld)(nil)
-
-// InodesCount implements SuperBlock.InodesCount.
-func (sb *SuperBlockOld) InodesCount() uint32 { return sb.InodesCountRaw }
-
-// BlocksCount implements SuperBlock.BlocksCount.
-func (sb *SuperBlockOld) BlocksCount() uint64 { return uint64(sb.BlocksCountLo) }
-
-// FreeBlocksCount implements SuperBlock.FreeBlocksCount.
-func (sb *SuperBlockOld) FreeBlocksCount() uint64 { return uint64(sb.FreeBlocksCountLo) }
-
-// FreeInodesCount implements SuperBlock.FreeInodesCount.
-func (sb *SuperBlockOld) FreeInodesCount() uint32 { return sb.FreeInodesCountRaw }
-
-// MountCount implements SuperBlock.MountCount.
-func (sb *SuperBlockOld) MountCount() uint16 { return sb.MountCountRaw }
-
-// MaxMountCount implements SuperBlock.MaxMountCount.
-func (sb *SuperBlockOld) MaxMountCount() uint16 { return sb.MaxMountCountRaw }
-
-// FirstDataBlock implements SuperBlock.FirstDataBlock.
-func (sb *SuperBlockOld) FirstDataBlock() uint32 { return sb.FirstDataBlockRaw }
-
-// BlockSize implements SuperBlock.BlockSize.
-func (sb *SuperBlockOld) BlockSize() uint64 { return 1 << (10 + sb.LogBlockSize) }
-
-// BlocksPerGroup implements SuperBlock.BlocksPerGroup.
-func (sb *SuperBlockOld) BlocksPerGroup() uint32 { return sb.BlocksPerGroupRaw }
-
-// ClusterSize implements SuperBlock.ClusterSize.
-func (sb *SuperBlockOld) ClusterSize() uint64 { return 1 << (10 + sb.LogClusterSize) }
-
-// ClustersPerGroup implements SuperBlock.ClustersPerGroup.
-func (sb *SuperBlockOld) ClustersPerGroup() uint32 { return sb.ClustersPerGroupRaw }
-
-// InodeSize implements SuperBlock.InodeSize.
-func (sb *SuperBlockOld) InodeSize() uint16 { return OldInodeSize }
-
-// InodesPerGroup implements SuperBlock.InodesPerGroup.
-func (sb *SuperBlockOld) InodesPerGroup() uint32 { return sb.InodesPerGroupRaw }
-
-// BgDescSize implements SuperBlock.BgDescSize.
-func (sb *SuperBlockOld) BgDescSize() uint16 { return 32 }
-
-// CompatibleFeatures implements SuperBlock.CompatibleFeatures.
-func (sb *SuperBlockOld) CompatibleFeatures() CompatFeatures { return CompatFeatures{} }
-
-// IncompatibleFeatures implements SuperBlock.IncompatibleFeatures.
-func (sb *SuperBlockOld) IncompatibleFeatures() IncompatFeatures { return IncompatFeatures{} }
-
-// ReadOnlyCompatibleFeatures implements SuperBlock.ReadOnlyCompatibleFeatures.
-func (sb *SuperBlockOld) ReadOnlyCompatibleFeatures() RoCompatFeatures { return RoCompatFeatures{} }
-
-// Magic implements SuperBlock.Magic.
-func (sb *SuperBlockOld) Magic() uint16 { return sb.MagicRaw }
-
-// Revision implements SuperBlock.Revision.
-func (sb *SuperBlockOld) Revision() SbRevision { return SbRevision(sb.RevLevel) }
diff --git a/pkg/sentry/fsimpl/ext/ext.go b/pkg/sentry/fsimpl/ext/ext.go
deleted file mode 100644
index 38fb7962b..000000000
--- a/pkg/sentry/fsimpl/ext/ext.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ext implements readonly ext(2/3/4) filesystems.
-package ext
-
-import (
- "errors"
- "fmt"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fd"
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// Name is the name of this filesystem.
-const Name = "ext"
-
-// FilesystemType implements vfs.FilesystemType.
-//
-// +stateify savable
-type FilesystemType struct{}
-
-// getDeviceFd returns an io.ReaderAt to the underlying device.
-// Currently there are two ways of mounting an ext(2/3/4) fs:
-// 1. Specify a mount with our internal special MountType in the OCI spec.
-// 2. Expose the device to the container and mount it from application layer.
-func getDeviceFd(source string, opts vfs.GetFilesystemOptions) (io.ReaderAt, error) {
- if opts.InternalData == nil {
- // User mount call.
- // TODO(b/134676337): Open the device specified by `source` and return that.
- panic("unimplemented")
- }
-
- // GetFilesystem call originated from within the sentry.
- devFd, ok := opts.InternalData.(int)
- if !ok {
- return nil, errors.New("internal data for ext fs must be an int containing the file descriptor to device")
- }
-
- if devFd < 0 {
- return nil, fmt.Errorf("ext device file descriptor is not valid: %d", devFd)
- }
-
- // The fd.ReadWriter returned from fd.NewReadWriter() does not take ownership
- // of the file descriptor and hence will not close it when it is garbage
- // collected.
- return fd.NewReadWriter(devFd), nil
-}
-
-// isCompatible checks if the superblock has feature sets which are compatible.
-// We only need to check the superblock incompatible feature set since we are
-// mounting readonly. We will also need to check readonly compatible feature
-// set when mounting for read/write.
-func isCompatible(sb disklayout.SuperBlock) bool {
- // Please note that what is being checked is limited based on the fact that we
- // are mounting readonly and that we are not journaling. When mounting
- // read/write or with a journal, this must be reevaluated.
- incompatFeatures := sb.IncompatibleFeatures()
- if incompatFeatures.MetaBG {
- log.Warningf("ext fs: meta block groups are not supported")
- return false
- }
- if incompatFeatures.MMP {
- log.Warningf("ext fs: multiple mount protection is not supported")
- return false
- }
- if incompatFeatures.Encrypted {
- log.Warningf("ext fs: encrypted inodes not supported")
- return false
- }
- if incompatFeatures.InlineData {
- log.Warningf("ext fs: inline files not supported")
- return false
- }
- return true
-}
-
-// Name implements vfs.FilesystemType.Name.
-func (FilesystemType) Name() string {
- return Name
-}
-
-// Release implements vfs.FilesystemType.Release.
-func (FilesystemType) Release(ctx context.Context) {}
-
-// GetFilesystem implements vfs.FilesystemType.GetFilesystem.
-func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, creds *auth.Credentials, source string, opts vfs.GetFilesystemOptions) (*vfs.Filesystem, *vfs.Dentry, error) {
- // TODO(b/134676337): Ensure that the user is mounting readonly. If not,
- // EACCESS should be returned according to mount(2). Filesystem independent
- // flags (like readonly) are currently not available in pkg/sentry/vfs.
-
- devMinor, err := vfsObj.GetAnonBlockDevMinor()
- if err != nil {
- return nil, nil, err
- }
-
- dev, err := getDeviceFd(source, opts)
- if err != nil {
- return nil, nil, err
- }
-
- fs := filesystem{
- dev: dev,
- inodeCache: make(map[uint32]*inode),
- devMinor: devMinor,
- }
- fs.vfsfs.Init(vfsObj, &fsType, &fs)
- fs.sb, err = readSuperBlock(dev)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- if fs.sb.Magic() != linux.EXT_SUPER_MAGIC {
- // mount(2) specifies that EINVAL should be returned if the superblock is
- // invalid.
- fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EINVAL
- }
-
- // Refuse to mount if the filesystem is incompatible.
- if !isCompatible(fs.sb) {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EINVAL
- }
-
- fs.bgs, err = readBlockGroups(dev, fs.sb)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
-
- rootInode, err := fs.getOrCreateInodeLocked(disklayout.RootDirInode)
- if err != nil {
- fs.vfsfs.DecRef(ctx)
- return nil, nil, err
- }
- rootInode.incRef()
-
- return &fs.vfsfs, &newDentry(rootInode).vfsd, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/ext_test.go b/pkg/sentry/fsimpl/ext/ext_test.go
deleted file mode 100644
index d9fd4590c..000000000
--- a/pkg/sentry/fsimpl/ext/ext_test.go
+++ /dev/null
@@ -1,926 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "io"
- "os"
- "path"
- "sort"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-const (
- assetsDir = "pkg/sentry/fsimpl/ext/assets"
-)
-
-var (
- ext2ImagePath = path.Join(assetsDir, "tiny.ext2")
- ext3ImagePath = path.Join(assetsDir, "tiny.ext3")
- ext4ImagePath = path.Join(assetsDir, "tiny.ext4")
-)
-
-// setUp opens imagePath as an ext Filesystem and returns all necessary
-// elements required to run tests. If error is non-nil, it also returns a tear
-// down function which must be called after the test is run for clean up.
-func setUp(t *testing.T, imagePath string) (context.Context, *vfs.VirtualFilesystem, *vfs.VirtualDentry, func(), error) {
- localImagePath, err := testutil.FindFile(imagePath)
- if err != nil {
- return nil, nil, nil, nil, fmt.Errorf("failed to open local image at path %s: %v", imagePath, err)
- }
-
- f, err := os.Open(localImagePath)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- ctx := contexttest.Context(t)
- creds := auth.CredentialsFromContext(ctx)
-
- // Create VFS.
- vfsObj := &vfs.VirtualFilesystem{}
- if err := vfsObj.Init(ctx); err != nil {
- t.Fatalf("VFS init: %v", err)
- }
- vfsObj.MustRegisterFilesystemType("extfs", FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
- })
- mntns, err := vfsObj.NewMountNamespace(ctx, creds, localImagePath, "extfs", &vfs.MountOptions{
- GetFilesystemOptions: vfs.GetFilesystemOptions{
- InternalData: int(f.Fd()),
- },
- })
- if err != nil {
- f.Close()
- return nil, nil, nil, nil, err
- }
-
- root := mntns.Root()
- root.IncRef()
-
- tearDown := func() {
- root.DecRef(ctx)
-
- if err := f.Close(); err != nil {
- t.Fatalf("tearDown failed: %v", err)
- }
- }
- return ctx, vfsObj, &root, tearDown, nil
-}
-
-// TODO(b/134676337): Test vfs.FilesystemImpl.ReadlinkAt and
-// vfs.FilesystemImpl.StatFSAt which are not implemented in
-// vfs.VirtualFilesystem yet.
-
-// TestSeek tests vfs.FileDescriptionImpl.Seek functionality.
-func TestSeek(t *testing.T) {
- type seekTest struct {
- name string
- image string
- path string
- }
-
- tests := []seekTest{
- {
- name: "ext4 root dir seek",
- image: ext4ImagePath,
- path: "/",
- },
- {
- name: "ext3 root dir seek",
- image: ext3ImagePath,
- path: "/",
- },
- {
- name: "ext2 root dir seek",
- image: ext2ImagePath,
- path: "/",
- },
- {
- name: "ext4 reg file seek",
- image: ext4ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext3 reg file seek",
- image: ext3ImagePath,
- path: "/file.txt",
- },
- {
- name: "ext2 reg file seek",
- image: ext2ImagePath,
- path: "/file.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- if n, err := fd.Seek(ctx, 0, linux.SEEK_SET); n != 0 || err != nil {
- t.Errorf("expected seek position 0, got %d and error %v", n, err)
- }
-
- stat, err := fd.Stat(ctx, vfs.StatOptions{})
- if err != nil {
- t.Errorf("fd.stat failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- // We should be able to seek beyond the end of file.
- size := int64(stat.Size)
- if n, err := fd.Seek(ctx, size, linux.SEEK_SET); n != size || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -1, linux.SEEK_SET); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- if n, err := fd.Seek(ctx, 3, linux.SEEK_CUR); n != size+3 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+3, n, err)
- }
-
- // Make sure negative offsets work with SEEK_CUR.
- if n, err := fd.Seek(ctx, -2, linux.SEEK_CUR); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 2), linux.SEEK_CUR); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
-
- // Make sure SEEK_END works with regular files.
- if _, ok := fd.Impl().(*regularFileFD); ok {
- // Seek back to 0.
- if n, err := fd.Seek(ctx, -size, linux.SEEK_END); n != 0 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", 0, n, err)
- }
-
- // Seek forward beyond EOF.
- if n, err := fd.Seek(ctx, 1, linux.SEEK_END); n != size+1 || err != nil {
- t.Errorf("expected seek position %d, got %d and error %v", size+1, n, err)
- }
-
- // EINVAL should be returned if the resulting offset is negative.
- if _, err := fd.Seek(ctx, -(size + 1), linux.SEEK_END); err != syserror.EINVAL {
- t.Errorf("expected error EINVAL but got %v", err)
- }
- }
- })
- }
-}
-
-// TestStatAt tests filesystem.StatAt functionality.
-func TestStatAt(t *testing.T) {
- type statAtTest struct {
- name string
- image string
- path string
- want linux.Statx
- }
-
- tests := []statAtTest{
- {
- name: "ext4 statx small file",
- image: ext4ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext3 statx small file",
- image: ext3ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext2 statx small file",
- image: ext2ImagePath,
- path: "/file.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13,
- },
- },
- {
- name: "ext4 statx big file",
- image: ext4ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext3 statx big file",
- image: ext3ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext2 statx big file",
- image: ext2ImagePath,
- path: "/bigfile.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0644 | linux.ModeRegular,
- Size: 13042,
- },
- },
- {
- name: "ext4 statx symlink file",
- image: ext4ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext3 statx symlink file",
- image: ext3ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- {
- name: "ext2 statx symlink file",
- image: ext2ImagePath,
- path: "/symlink.txt",
- want: linux.Statx{
- Blksize: 0x400,
- Nlink: 1,
- UID: 0,
- GID: 0,
- Mode: 0777 | linux.ModeSymlink,
- Size: 8,
- },
- },
- }
-
- // Ignore the fields that are not supported by filesystem.StatAt yet and
- // those which are likely to change as the image does.
- ignoredFields := map[string]bool{
- "Attributes": true,
- "AttributesMask": true,
- "Atime": true,
- "Blocks": true,
- "Btime": true,
- "Ctime": true,
- "DevMajor": true,
- "DevMinor": true,
- "Ino": true,
- "Mask": true,
- "Mtime": true,
- "RdevMajor": true,
- "RdevMinor": true,
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- got, err := vfsfs.StatAt(ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.StatOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.StatAt failed for file %s in image %s: %v", test.path, test.image, err)
- }
-
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- _, ok := ignoredFields[p.String()]
- return ok
- }, cmp.Ignore())
- if diff := cmp.Diff(got, test.want, cmpIgnoreFields, cmpopts.IgnoreUnexported(linux.Statx{})); diff != "" {
- t.Errorf("stat mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRead tests the read functionality for vfs file descriptions.
-func TestRead(t *testing.T) {
- type readTest struct {
- name string
- image string
- absPath string
- }
-
- tests := []readTest{
- {
- name: "ext4 read small file",
- image: ext4ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext3 read small file",
- image: ext3ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext2 read small file",
- image: ext2ImagePath,
- absPath: "/file.txt",
- },
- {
- name: "ext4 read big file",
- image: ext4ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext3 read big file",
- image: ext3ImagePath,
- absPath: "/bigfile.txt",
- },
- {
- name: "ext2 read big file",
- image: ext2ImagePath,
- absPath: "/bigfile.txt",
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.absPath)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- // Get a local file descriptor and compare its functionality with a vfs file
- // description for the same file.
- localFile, err := testutil.FindFile(path.Join(assetsDir, test.absPath))
- if err != nil {
- t.Fatalf("testutil.FindFile failed for %s: %v", test.absPath, err)
- }
-
- f, err := os.Open(localFile)
- if err != nil {
- t.Fatalf("os.Open failed for %s: %v", localFile, err)
- }
- defer f.Close()
-
- // Read the entire file by reading one byte repeatedly. Doing this stress
- // tests the underlying file reader implementation.
- got := make([]byte, 1)
- want := make([]byte, 1)
- for {
- n, err := f.Read(want)
- fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{})
-
- if diff := cmp.Diff(got, want); diff != "" {
- t.Errorf("file data mismatch (-want +got):\n%s", diff)
- }
-
- // Make sure there is no more file data left after getting EOF.
- if n == 0 || err == io.EOF {
- if n, _ := fd.Read(ctx, usermem.BytesIOSequence(got), vfs.ReadOptions{}); n != 0 {
- t.Errorf("extra unexpected file data in file %s in image %s", test.absPath, test.image)
- }
-
- break
- }
-
- if err != nil {
- t.Fatalf("read failed: %v", err)
- }
- }
- })
- }
-}
-
-// iterDirentsCb is a simple callback which just keeps adding the dirents to an
-// internal list. Implements vfs.IterDirentsCallback.
-type iterDirentsCb struct {
- dirents []vfs.Dirent
-}
-
-// Compiles only if iterDirentCb implements vfs.IterDirentsCallback.
-var _ vfs.IterDirentsCallback = (*iterDirentsCb)(nil)
-
-// newIterDirentsCb is the iterDirent
-func newIterDirentCb() *iterDirentsCb {
- return &iterDirentsCb{dirents: make([]vfs.Dirent, 0)}
-}
-
-// Handle implements vfs.IterDirentsCallback.Handle.
-func (cb *iterDirentsCb) Handle(dirent vfs.Dirent) error {
- cb.dirents = append(cb.dirents, dirent)
- return nil
-}
-
-// TestIterDirents tests the FileDescriptionImpl.IterDirents functionality.
-func TestIterDirents(t *testing.T) {
- type iterDirentTest struct {
- name string
- image string
- path string
- want []vfs.Dirent
- }
-
- wantDirents := []vfs.Dirent{
- {
- Name: ".",
- Type: linux.DT_DIR,
- },
- {
- Name: "..",
- Type: linux.DT_DIR,
- },
- {
- Name: "lost+found",
- Type: linux.DT_DIR,
- },
- {
- Name: "file.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "bigfile.txt",
- Type: linux.DT_REG,
- },
- {
- Name: "symlink.txt",
- Type: linux.DT_LNK,
- },
- }
- tests := []iterDirentTest{
- {
- name: "ext4 root dir iteration",
- image: ext4ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext3 root dir iteration",
- image: ext3ImagePath,
- path: "/",
- want: wantDirents,
- },
- {
- name: "ext2 root dir iteration",
- image: ext2ImagePath,
- path: "/",
- want: wantDirents,
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- ctx, vfsfs, root, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fd, err := vfsfs.OpenAt(
- ctx,
- auth.CredentialsFromContext(ctx),
- &vfs.PathOperation{Root: *root, Start: *root, Path: fspath.Parse(test.path)},
- &vfs.OpenOptions{},
- )
- if err != nil {
- t.Fatalf("vfsfs.OpenAt failed: %v", err)
- }
-
- cb := &iterDirentsCb{}
- if err = fd.IterDirents(ctx, cb); err != nil {
- t.Fatalf("dir fd.IterDirents() failed: %v", err)
- }
-
- sort.Slice(cb.dirents, func(i int, j int) bool { return cb.dirents[i].Name < cb.dirents[j].Name })
- sort.Slice(test.want, func(i int, j int) bool { return test.want[i].Name < test.want[j].Name })
-
- // Ignore the inode number and offset of dirents because those are likely to
- // change as the underlying image changes.
- cmpIgnoreFields := cmp.FilterPath(func(p cmp.Path) bool {
- return p.String() == "Ino" || p.String() == "NextOff"
- }, cmp.Ignore())
- if diff := cmp.Diff(cb.dirents, test.want, cmpIgnoreFields); diff != "" {
- t.Errorf("dirents mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestRootDir tests that the root directory inode is correctly initialized and
-// returned from setUp.
-func TestRootDir(t *testing.T) {
- type inodeProps struct {
- Mode linux.FileMode
- UID auth.KUID
- GID auth.KGID
- Size uint64
- InodeSize uint16
- Links uint16
- Flags disklayout.InodeFlags
- }
-
- type rootDirTest struct {
- name string
- image string
- wantInode inodeProps
- }
-
- tests := []rootDirTest{
- {
- name: "ext4 root dir",
- image: ext4ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- Flags: disklayout.InodeFlags{Extents: true},
- },
- },
- {
- name: "ext3 root dir",
- image: ext3ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- {
- name: "ext2 root dir",
- image: ext2ImagePath,
- wantInode: inodeProps{
- Mode: linux.ModeDirectory | 0755,
- Size: 0x400,
- InodeSize: 0x80,
- Links: 3,
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- d, ok := vd.Dentry().Impl().(*dentry)
- if !ok {
- t.Fatalf("ext dentry of incorrect type: %T", vd.Dentry().Impl())
- }
-
- // Offload inode contents into local structs for comparison.
- gotInode := inodeProps{
- Mode: d.inode.diskInode.Mode(),
- UID: d.inode.diskInode.UID(),
- GID: d.inode.diskInode.GID(),
- Size: d.inode.diskInode.Size(),
- InodeSize: d.inode.diskInode.InodeSize(),
- Links: d.inode.diskInode.LinksCount(),
- Flags: d.inode.diskInode.Flags(),
- }
-
- if diff := cmp.Diff(gotInode, test.wantInode); diff != "" {
- t.Errorf("inode mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
-
-// TestFilesystemInit tests that the filesystem superblock and block group
-// descriptors are correctly read in and initialized.
-func TestFilesystemInit(t *testing.T) {
- // sb only contains the immutable properties of the superblock.
- type sb struct {
- InodesCount uint32
- BlocksCount uint64
- MaxMountCount uint16
- FirstDataBlock uint32
- BlockSize uint64
- BlocksPerGroup uint32
- ClusterSize uint64
- ClustersPerGroup uint32
- InodeSize uint16
- InodesPerGroup uint32
- BgDescSize uint16
- Magic uint16
- Revision disklayout.SbRevision
- CompatFeatures disklayout.CompatFeatures
- IncompatFeatures disklayout.IncompatFeatures
- RoCompatFeatures disklayout.RoCompatFeatures
- }
-
- // bg only contains the immutable properties of the block group descriptor.
- type bg struct {
- InodeTable uint64
- BlockBitmap uint64
- InodeBitmap uint64
- ExclusionBitmap uint64
- Flags disklayout.BGFlags
- }
-
- type fsInitTest struct {
- name string
- image string
- wantSb sb
- wantBgs []bg
- }
-
- tests := []fsInitTest{
- {
- name: "ext4 filesystem init",
- image: ext4ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x40,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- Extents: true,
- Is64Bit: true,
- FlexBg: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- HugeFile: true,
- DirNlink: true,
- ExtraIsize: true,
- MetadataCsum: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x23,
- BlockBitmap: 0x3,
- InodeBitmap: 0x13,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext3 filesystem init",
- image: ext3ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- {
- name: "ext2 filesystem init",
- image: ext2ImagePath,
- wantSb: sb{
- InodesCount: 0x10,
- BlocksCount: 0x40,
- MaxMountCount: 0xffff,
- FirstDataBlock: 0x1,
- BlockSize: 0x400,
- BlocksPerGroup: 0x2000,
- ClusterSize: 0x400,
- ClustersPerGroup: 0x2000,
- InodeSize: 0x80,
- InodesPerGroup: 0x10,
- BgDescSize: 0x20,
- Magic: linux.EXT_SUPER_MAGIC,
- Revision: disklayout.DynamicRev,
- CompatFeatures: disklayout.CompatFeatures{
- ExtAttr: true,
- ResizeInode: true,
- DirIndex: true,
- },
- IncompatFeatures: disklayout.IncompatFeatures{
- DirentFileType: true,
- },
- RoCompatFeatures: disklayout.RoCompatFeatures{
- Sparse: true,
- LargeFile: true,
- },
- },
- wantBgs: []bg{
- {
- InodeTable: 0x5,
- BlockBitmap: 0x3,
- InodeBitmap: 0x4,
- Flags: disklayout.BGFlags{
- InodeZeroed: true,
- },
- },
- },
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- _, _, vd, tearDown, err := setUp(t, test.image)
- if err != nil {
- t.Fatalf("setUp failed: %v", err)
- }
- defer tearDown()
-
- fs, ok := vd.Mount().Filesystem().Impl().(*filesystem)
- if !ok {
- t.Fatalf("ext filesystem of incorrect type: %T", vd.Mount().Filesystem().Impl())
- }
-
- // Offload superblock and block group descriptors contents into
- // local structs for comparison.
- totalFreeInodes := uint32(0)
- totalFreeBlocks := uint64(0)
- gotSb := sb{
- InodesCount: fs.sb.InodesCount(),
- BlocksCount: fs.sb.BlocksCount(),
- MaxMountCount: fs.sb.MaxMountCount(),
- FirstDataBlock: fs.sb.FirstDataBlock(),
- BlockSize: fs.sb.BlockSize(),
- BlocksPerGroup: fs.sb.BlocksPerGroup(),
- ClusterSize: fs.sb.ClusterSize(),
- ClustersPerGroup: fs.sb.ClustersPerGroup(),
- InodeSize: fs.sb.InodeSize(),
- InodesPerGroup: fs.sb.InodesPerGroup(),
- BgDescSize: fs.sb.BgDescSize(),
- Magic: fs.sb.Magic(),
- Revision: fs.sb.Revision(),
- CompatFeatures: fs.sb.CompatibleFeatures(),
- IncompatFeatures: fs.sb.IncompatibleFeatures(),
- RoCompatFeatures: fs.sb.ReadOnlyCompatibleFeatures(),
- }
- gotNumBgs := len(fs.bgs)
- gotBgs := make([]bg, gotNumBgs)
- for i := 0; i < gotNumBgs; i++ {
- gotBgs[i].InodeTable = fs.bgs[i].InodeTable()
- gotBgs[i].BlockBitmap = fs.bgs[i].BlockBitmap()
- gotBgs[i].InodeBitmap = fs.bgs[i].InodeBitmap()
- gotBgs[i].ExclusionBitmap = fs.bgs[i].ExclusionBitmap()
- gotBgs[i].Flags = fs.bgs[i].Flags()
-
- totalFreeInodes += fs.bgs[i].FreeInodesCount()
- totalFreeBlocks += uint64(fs.bgs[i].FreeBlocksCount())
- }
-
- if diff := cmp.Diff(gotSb, test.wantSb); diff != "" {
- t.Errorf("superblock mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(gotBgs, test.wantBgs); diff != "" {
- t.Errorf("block group descriptors mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeInodes, fs.sb.FreeInodesCount()); diff != "" {
- t.Errorf("total free inodes mismatch (-want +got):\n%s", diff)
- }
-
- if diff := cmp.Diff(totalFreeBlocks, fs.sb.FreeBlocksCount()); diff != "" {
- t.Errorf("total free blocks mismatch (-want +got):\n%s", diff)
- }
- })
- }
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_file.go b/pkg/sentry/fsimpl/ext/extent_file.go
deleted file mode 100644
index 778460107..000000000
--- a/pkg/sentry/fsimpl/ext/extent_file.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
- "sort"
-
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// extentFile is a type of regular file which uses extents to store file data.
-//
-// +stateify savable
-type extentFile struct {
- regFile regularFile
-
- // root is the root extent node. This lives in the 60 byte diskInode.Data().
- // Immutable.
- root disklayout.ExtentNode
-}
-
-// Compiles only if extentFile implements io.ReaderAt.
-var _ io.ReaderAt = (*extentFile)(nil)
-
-// newExtentFile is the extent file constructor. It reads the entire extent
-// tree into memory.
-// TODO(b/134676337): Build extent tree on demand to reduce memory usage.
-func newExtentFile(args inodeArgs) (*extentFile, error) {
- file := &extentFile{}
- file.regFile.impl = file
- file.regFile.inode.init(args, &file.regFile)
- err := file.buildExtTree()
- if err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// buildExtTree builds the extent tree by reading it from disk by doing
-// running a simple DFS. It first reads the root node from the inode struct in
-// memory. Then it recursively builds the rest of the tree by reading it off
-// disk.
-//
-// Precondition: inode flag InExtents must be set.
-func (f *extentFile) buildExtTree() error {
- rootNodeData := f.regFile.inode.diskInode.Data()
-
- f.root.Header.UnmarshalBytes(rootNodeData[:disklayout.ExtentHeaderSize])
-
- // Root node can not have more than 4 entries: 60 bytes = 1 header + 4 entries.
- if f.root.Header.NumEntries > 4 {
- // read(2) specifies that EINVAL should be returned if the file is unsuitable
- // for reading.
- return syserror.EINVAL
- }
-
- f.root.Entries = make([]disklayout.ExtentEntryPair, f.root.Header.NumEntries)
- for i, off := uint16(0), disklayout.ExtentEntrySize; i < f.root.Header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if f.root.Header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
- curEntry.UnmarshalBytes(rootNodeData[off : off+disklayout.ExtentEntrySize])
- f.root.Entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if f.root.Header.Height > 0 {
- for i := uint16(0); i < f.root.Header.NumEntries; i++ {
- var err error
- if f.root.Entries[i].Node, err = f.buildExtTreeFromDisk(f.root.Entries[i].Entry); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// buildExtTreeFromDisk reads the extent tree nodes from disk and recursively
-// builds the tree. Performs a simple DFS. It returns the ExtentNode pointed to
-// by the ExtentEntry.
-func (f *extentFile) buildExtTreeFromDisk(entry disklayout.ExtentEntry) (*disklayout.ExtentNode, error) {
- var header disklayout.ExtentHeader
- off := entry.PhysicalBlock() * f.regFile.inode.blkSize
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), &header)
- if err != nil {
- return nil, err
- }
-
- entries := make([]disklayout.ExtentEntryPair, header.NumEntries)
- for i, off := uint16(0), off+disklayout.ExtentEntrySize; i < header.NumEntries; i, off = i+1, off+disklayout.ExtentEntrySize {
- var curEntry disklayout.ExtentEntry
- if header.Height == 0 {
- // Leaf node.
- curEntry = &disklayout.Extent{}
- } else {
- // Internal node.
- curEntry = &disklayout.ExtentIdx{}
- }
-
- err := readFromDisk(f.regFile.inode.fs.dev, int64(off), curEntry)
- if err != nil {
- return nil, err
- }
- entries[i].Entry = curEntry
- }
-
- // If this node is internal, perform DFS.
- if header.Height > 0 {
- for i := uint16(0); i < header.NumEntries; i++ {
- var err error
- entries[i].Node, err = f.buildExtTreeFromDisk(entries[i].Entry)
- if err != nil {
- return nil, err
- }
- }
- }
-
- return &disklayout.ExtentNode{header, entries}, nil
-}
-
-// ReadAt implements io.ReaderAt.ReadAt.
-func (f *extentFile) ReadAt(dst []byte, off int64) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
-
- if off < 0 {
- return 0, syserror.EINVAL
- }
-
- if uint64(off) >= f.regFile.inode.diskInode.Size() {
- return 0, io.EOF
- }
-
- n, err := f.read(&f.root, uint64(off), dst)
- if n < len(dst) && err == nil {
- err = io.EOF
- }
- return n, err
-}
-
-// read is the recursive step of extentFile.ReadAt which traverses the extent
-// tree from the node passed and reads file data.
-func (f *extentFile) read(node *disklayout.ExtentNode, off uint64, dst []byte) (int, error) {
- // Perform a binary search for the node covering bytes starting at r.fileOff.
- // A highly fragmented filesystem can have upto 340 entries and so linear
- // search should be avoided. Finds the first entry which does not cover the
- // file block we want and subtracts 1 to get the desired index.
- fileBlk := uint32(off / f.regFile.inode.blkSize)
- n := len(node.Entries)
- found := sort.Search(n, func(i int) bool {
- return node.Entries[i].Entry.FileBlock() > fileBlk
- }) - 1
-
- // We should be in this recursive step only if the data we want exists under
- // the current node.
- if found < 0 {
- panic("searching for a file block in an extent entry which does not cover it")
- }
-
- read := 0
- toRead := len(dst)
- var curR int
- var err error
- for i := found; i < n && read < toRead; i++ {
- if node.Header.Height == 0 {
- curR, err = f.readFromExtent(node.Entries[i].Entry.(*disklayout.Extent), off, dst[read:])
- } else {
- curR, err = f.read(node.Entries[i].Node, off, dst[read:])
- }
-
- read += curR
- off += uint64(curR)
- if err != nil {
- return read, err
- }
- }
-
- return read, nil
-}
-
-// readFromExtent reads file data from the extent. It takes advantage of the
-// sequential nature of extents and reads file data from multiple blocks in one
-// call.
-//
-// A non-nil error indicates that this is a partial read and there is probably
-// more to read from this extent. The caller should propagate the error upward
-// and not move to the next extent in the tree.
-//
-// A subsequent call to extentReader.Read should continue reading from where we
-// left off as expected.
-func (f *extentFile) readFromExtent(ex *disklayout.Extent, off uint64, dst []byte) (int, error) {
- curFileBlk := uint32(off / f.regFile.inode.blkSize)
- exFirstFileBlk := ex.FileBlock()
- exLastFileBlk := exFirstFileBlk + uint32(ex.Length) // This is exclusive.
-
- // We should be in this recursive step only if the data we want exists under
- // the current extent.
- if curFileBlk < exFirstFileBlk || exLastFileBlk <= curFileBlk {
- panic("searching for a file block in an extent which does not cover it")
- }
-
- curPhyBlk := uint64(curFileBlk-exFirstFileBlk) + ex.PhysicalBlock()
- readStart := curPhyBlk*f.regFile.inode.blkSize + (off % f.regFile.inode.blkSize)
-
- endPhyBlk := ex.PhysicalBlock() + uint64(ex.Length)
- extentEnd := endPhyBlk * f.regFile.inode.blkSize // This is exclusive.
-
- toRead := int(extentEnd - readStart)
- if len(dst) < toRead {
- toRead = len(dst)
- }
-
- n, _ := f.regFile.inode.fs.dev.ReadAt(dst[:toRead], int64(readStart))
- if n < toRead {
- return n, syserror.EIO
- }
- return n, nil
-}
diff --git a/pkg/sentry/fsimpl/ext/extent_test.go b/pkg/sentry/fsimpl/ext/extent_test.go
deleted file mode 100644
index 985f76ac0..000000000
--- a/pkg/sentry/fsimpl/ext/extent_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "bytes"
- "math/rand"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
-)
-
-const (
- // mockExtentBlkSize is the mock block size used for testing.
- // No block has more than 1 header + 4 entries.
- mockExtentBlkSize = uint64(64)
-)
-
-// The tree described below looks like:
-//
-// 0.{Head}[Idx][Idx]
-// / \
-// / \
-// 1.{Head}[Ext][Ext] 2.{Head}[Idx]
-// / | \
-// [Phy] [Phy, Phy] 3.{Head}[Ext]
-// |
-// [Phy, Phy, Phy]
-//
-// Legend:
-// - Head = ExtentHeader
-// - Idx = ExtentIdx
-// - Ext = Extent
-// - Phy = Physical Block
-//
-// Please note that ext4 might not construct extent trees looking like this.
-// This is purely for testing the tree traversal logic.
-var (
- node3 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 3,
- Length: 3,
- StartBlockLo: 6,
- },
- Node: nil,
- },
- },
- }
-
- node2 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 1,
- MaxEntries: 4,
- Height: 1,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 2,
- },
- Node: node3,
- },
- },
- }
-
- node1 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 0,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 0,
- Length: 1,
- StartBlockLo: 3,
- },
- Node: nil,
- },
- {
- Entry: &disklayout.Extent{
- FirstFileBlock: 1,
- Length: 2,
- StartBlockLo: 4,
- },
- Node: nil,
- },
- },
- }
-
- node0 = &disklayout.ExtentNode{
- Header: disklayout.ExtentHeader{
- Magic: disklayout.ExtentMagic,
- NumEntries: 2,
- MaxEntries: 4,
- Height: 2,
- },
- Entries: []disklayout.ExtentEntryPair{
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 0,
- ChildBlockLo: 0,
- },
- Node: node1,
- },
- {
- Entry: &disklayout.ExtentIdx{
- FirstFileBlock: 3,
- ChildBlockLo: 1,
- },
- Node: node2,
- },
- },
- }
-)
-
-// TestExtentReader stress tests extentReader functionality. It performs random
-// length reads from all possible positions in the extent tree.
-func TestExtentReader(t *testing.T) {
- mockExtentFile, want := extentTreeSetUp(t, node0)
- n := len(want)
-
- for from := 0; from < n; from++ {
- got := make([]byte, n-from)
-
- if read, err := mockExtentFile.ReadAt(got, int64(from)); err != nil {
- t.Fatalf("file read operation from offset %d to %d only read %d bytes: %v", from, n, read, err)
- }
-
- if diff := cmp.Diff(got, want[from:]); diff != "" {
- t.Fatalf("file data from offset %d to %d mismatched (-want +got):\n%s", from, n, diff)
- }
- }
-}
-
-// TestBuildExtentTree tests the extent tree building logic.
-func TestBuildExtentTree(t *testing.T) {
- mockExtentFile, _ := extentTreeSetUp(t, node0)
-
- opt := cmpopts.IgnoreUnexported(disklayout.ExtentIdx{}, disklayout.ExtentHeader{})
- if diff := cmp.Diff(&mockExtentFile.root, node0, opt); diff != "" {
- t.Errorf("extent tree mismatch (-want +got):\n%s", diff)
- }
-}
-
-// extentTreeSetUp writes the passed extent tree to a mock disk as an extent
-// tree. It also constucts a mock extent file with the same tree built in it.
-// It also writes random data file data and returns it.
-func extentTreeSetUp(t *testing.T, root *disklayout.ExtentNode) (*extentFile, []byte) {
- t.Helper()
-
- mockDisk := make([]byte, mockExtentBlkSize*10)
- mockExtentFile := &extentFile{}
- args := inodeArgs{
- fs: &filesystem{
- dev: bytes.NewReader(mockDisk),
- },
- diskInode: &disklayout.InodeNew{
- InodeOld: disklayout.InodeOld{
- SizeLo: uint32(mockExtentBlkSize) * getNumPhyBlks(root),
- },
- },
- blkSize: mockExtentBlkSize,
- }
- mockExtentFile.regFile.inode.init(args, &mockExtentFile.regFile)
-
- fileData := writeTree(&mockExtentFile.regFile.inode, mockDisk, node0, mockExtentBlkSize)
-
- if err := mockExtentFile.buildExtTree(); err != nil {
- t.Fatalf("inode.buildExtTree failed: %v", err)
- }
- return mockExtentFile, fileData
-}
-
-// writeTree writes the tree represented by `root` to the inode and disk. It
-// also writes random file data on disk.
-func writeTree(in *inode, disk []byte, root *disklayout.ExtentNode, mockExtentBlkSize uint64) []byte {
- rootData := in.diskInode.Data()
- root.Header.MarshalBytes(rootData)
- off := root.Header.SizeBytes()
- for _, ep := range root.Entries {
- ep.Entry.MarshalBytes(rootData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range root.Entries {
- if root.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeTreeToDisk is the recursive step for writeTree which writes the tree
-// on the disk only. Also writes random file data on disk.
-func writeTreeToDisk(disk []byte, curNode disklayout.ExtentEntryPair) []byte {
- nodeData := disk[curNode.Entry.PhysicalBlock()*mockExtentBlkSize:]
- curNode.Node.Header.MarshalBytes(nodeData)
- off := curNode.Node.Header.SizeBytes()
- for _, ep := range curNode.Node.Entries {
- ep.Entry.MarshalBytes(nodeData[off:])
- off += ep.Entry.SizeBytes()
- }
-
- var fileData []byte
- for _, ep := range curNode.Node.Entries {
- if curNode.Node.Header.Height == 0 {
- fileData = append(fileData, writeFileDataToExtent(disk, ep.Entry.(*disklayout.Extent))...)
- } else {
- fileData = append(fileData, writeTreeToDisk(disk, ep)...)
- }
- }
- return fileData
-}
-
-// writeFileDataToExtent writes random bytes to the blocks on disk that the
-// passed extent points to.
-func writeFileDataToExtent(disk []byte, ex *disklayout.Extent) []byte {
- phyExStartBlk := ex.PhysicalBlock()
- phyExStartOff := phyExStartBlk * mockExtentBlkSize
- phyExEndOff := phyExStartOff + uint64(ex.Length)*mockExtentBlkSize
- rand.Read(disk[phyExStartOff:phyExEndOff])
- return disk[phyExStartOff:phyExEndOff]
-}
-
-// getNumPhyBlks returns the number of physical blocks covered under the node.
-func getNumPhyBlks(node *disklayout.ExtentNode) uint32 {
- var res uint32
- for _, ep := range node.Entries {
- if node.Header.Height == 0 {
- res += uint32(ep.Entry.(*disklayout.Extent).Length)
- } else {
- res += getNumPhyBlks(ep.Node)
- }
- }
- return res
-}
diff --git a/pkg/sentry/fsimpl/ext/file_description.go b/pkg/sentry/fsimpl/ext/file_description.go
deleted file mode 100644
index 90b086468..000000000
--- a/pkg/sentry/fsimpl/ext/file_description.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// fileDescription is embedded by ext implementations of
-// vfs.FileDescriptionImpl.
-type fileDescription struct {
- vfsfd vfs.FileDescription
- vfs.FileDescriptionDefaultImpl
- vfs.LockFD
-}
-
-func (fd *fileDescription) filesystem() *filesystem {
- return fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)
-}
-
-func (fd *fileDescription) inode() *inode {
- return fd.vfsfd.Dentry().Impl().(*dentry).inode
-}
-
-// Stat implements vfs.FileDescriptionImpl.Stat.
-func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
- var stat linux.Statx
- fd.inode().statTo(&stat)
- return stat, nil
-}
-
-// SetStat implements vfs.FileDescriptionImpl.SetStat.
-func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
- if opts.Stat.Mask == 0 {
- return nil
- }
- return syserror.EPERM
-}
-
-// SetStat implements vfs.FileDescriptionImpl.StatFS.
-func (fd *fileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {
- var stat linux.Statfs
- fd.filesystem().statTo(&stat)
- return stat, nil
-}
-
-// Sync implements vfs.FileDescriptionImpl.Sync.
-func (fd *fileDescription) Sync(ctx context.Context) error {
- return nil
-}
diff --git a/pkg/sentry/fsimpl/ext/filesystem.go b/pkg/sentry/fsimpl/ext/filesystem.go
deleted file mode 100644
index d4fc484a2..000000000
--- a/pkg/sentry/fsimpl/ext/filesystem.go
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "errors"
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/fspath"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-var (
- // errResolveDirent indicates that the vfs.ResolvingPath.Component() does
- // not exist on the dentry tree but does exist on disk. So it has to be read in
- // using the in-memory dirent and added to the dentry tree. Usually indicates
- // the need to lock filesystem.mu for writing.
- errResolveDirent = errors.New("resolve path component using dirent")
-)
-
-// filesystem implements vfs.FilesystemImpl.
-//
-// +stateify savable
-type filesystem struct {
- vfsfs vfs.Filesystem
-
- // mu serializes changes to the Dentry tree.
- mu sync.RWMutex `state:"nosave"`
-
- // dev represents the underlying fs device. It does not require protection
- // because io.ReaderAt permits concurrent read calls to it. It translates to
- // the pread syscall which passes on the read request directly to the device
- // driver. Device drivers are intelligent in serving multiple concurrent read
- // requests in the optimal order (taking locality into consideration).
- dev io.ReaderAt
-
- // inodeCache maps absolute inode numbers to the corresponding Inode struct.
- // Inodes should be removed from this once their reference count hits 0.
- //
- // Protected by mu because most additions (see IterDirents) and all removals
- // from this corresponds to a change in the dentry tree.
- inodeCache map[uint32]*inode
-
- // sb represents the filesystem superblock. Immutable after initialization.
- sb disklayout.SuperBlock
-
- // bgs represents all the block group descriptors for the filesystem.
- // Immutable after initialization.
- bgs []disklayout.BlockGroup
-
- // devMinor is this filesystem's device minor number. Immutable after
- // initialization.
- devMinor uint32
-}
-
-// Compiles only if filesystem implements vfs.FilesystemImpl.
-var _ vfs.FilesystemImpl = (*filesystem)(nil)
-
-// stepLocked resolves rp.Component() in parent directory vfsd. The write
-// parameter passed tells if the caller has acquired filesystem.mu for writing
-// or not. If set to true, an existing inode on disk can be added to the dentry
-// tree if not present already.
-//
-// stepLocked is loosely analogous to fs/namei.c:walk_component().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-// * inode == vfsd.Impl().(*Dentry).inode.
-func stepLocked(ctx context.Context, rp *vfs.ResolvingPath, vfsd *vfs.Dentry, inode *inode, write bool) (*vfs.Dentry, *inode, error) {
- if !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, nil, err
- }
-
- for {
- name := rp.Component()
- if name == "." {
- rp.Advance()
- return vfsd, inode, nil
- }
- d := vfsd.Impl().(*dentry)
- if name == ".." {
- isRoot, err := rp.CheckRoot(ctx, vfsd)
- if err != nil {
- return nil, nil, err
- }
- if isRoot || d.parent == nil {
- rp.Advance()
- return vfsd, inode, nil
- }
- if err := rp.CheckMount(ctx, &d.parent.vfsd); err != nil {
- return nil, nil, err
- }
- rp.Advance()
- return &d.parent.vfsd, d.parent.inode, nil
- }
-
- dir := inode.impl.(*directory)
- child, ok := dir.childCache[name]
- if !ok {
- // We may need to instantiate a new dentry for this child.
- childDirent, ok := dir.childMap[name]
- if !ok {
- // The underlying inode does not exist on disk.
- return nil, nil, syserror.ENOENT
- }
-
- if !write {
- // filesystem.mu must be held for writing to add to the dentry tree.
- return nil, nil, errResolveDirent
- }
-
- // Create and add the component's dirent to the dentry tree.
- fs := rp.Mount().Filesystem().Impl().(*filesystem)
- childInode, err := fs.getOrCreateInodeLocked(childDirent.diskDirent.Inode())
- if err != nil {
- return nil, nil, err
- }
- // incRef because this is being added to the dentry tree.
- childInode.incRef()
- child = newDentry(childInode)
- child.parent = d
- child.name = name
- dir.childCache[name] = child
- }
- if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
- return nil, nil, err
- }
- if child.inode.isSymlink() && rp.ShouldFollowSymlink() {
- if err := rp.HandleSymlink(child.inode.impl.(*symlink).target); err != nil {
- return nil, nil, err
- }
- continue
- }
- rp.Advance()
- return &child.vfsd, child.inode, nil
- }
-}
-
-// walkLocked resolves rp to an existing file. The write parameter
-// passed tells if the caller has acquired filesystem.mu for writing or not.
-// If set to true, additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkLocked is loosely analogous to Linux's fs/namei.c:path_lookupat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-func walkLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Done() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if rp.MustBeDir() && !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walkParentLocked resolves all but the last path component of rp to an
-// existing directory. It does not check that the returned directory is
-// searchable by the provider of rp. The write parameter passed tells if the
-// caller has acquired filesystem.mu for writing or not. If set to true,
-// additions can be made to the dentry tree while walking.
-// If errResolveDirent is returned, the walk needs to be continued with an
-// upgraded filesystem.mu.
-//
-// walkParentLocked is loosely analogous to Linux's fs/namei.c:path_parentat().
-//
-// Preconditions:
-// * filesystem.mu must be locked (for writing if write param is true).
-// * !rp.Done().
-func walkParentLocked(ctx context.Context, rp *vfs.ResolvingPath, write bool) (*vfs.Dentry, *inode, error) {
- vfsd := rp.Start()
- inode := vfsd.Impl().(*dentry).inode
- for !rp.Final() {
- var err error
- vfsd, inode, err = stepLocked(ctx, rp, vfsd, inode, write)
- if err != nil {
- return nil, nil, err
- }
- }
- if !inode.isDir() {
- return nil, nil, syserror.ENOTDIR
- }
- return vfsd, inode, nil
-}
-
-// walk resolves rp to an existing file. If parent is set to true, it resolves
-// the rp till the parent of the last component which should be an existing
-// directory. If parent is false then resolves rp entirely. Attemps to resolve
-// the path as far as it can with a read lock and upgrades the lock if needed.
-func (fs *filesystem) walk(ctx context.Context, rp *vfs.ResolvingPath, parent bool) (*vfs.Dentry, *inode, error) {
- var (
- vfsd *vfs.Dentry
- inode *inode
- err error
- )
-
- // Try walking with the hopes that all dentries have already been pulled out
- // of disk. This reduces congestion (allows concurrent walks).
- fs.mu.RLock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, false)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, false)
- }
- fs.mu.RUnlock()
-
- if err == errResolveDirent {
- // Upgrade lock and continue walking. Lock upgrading in the middle of the
- // walk is fine as this is a read only filesystem.
- fs.mu.Lock()
- if parent {
- vfsd, inode, err = walkParentLocked(ctx, rp, true)
- } else {
- vfsd, inode, err = walkLocked(ctx, rp, true)
- }
- fs.mu.Unlock()
- }
-
- return vfsd, inode, err
-}
-
-// getOrCreateInodeLocked gets the inode corresponding to the inode number passed in.
-// It creates a new one with the given inode number if one does not exist.
-// The caller must increment the ref count if adding this to the dentry tree.
-//
-// Precondition: must be holding fs.mu for writing.
-func (fs *filesystem) getOrCreateInodeLocked(inodeNum uint32) (*inode, error) {
- if in, ok := fs.inodeCache[inodeNum]; ok {
- return in, nil
- }
-
- in, err := newInode(fs, inodeNum)
- if err != nil {
- return nil, err
- }
-
- fs.inodeCache[inodeNum] = in
- return in, nil
-}
-
-// statTo writes the statfs fields to the output parameter.
-func (fs *filesystem) statTo(stat *linux.Statfs) {
- stat.Type = uint64(fs.sb.Magic())
- stat.BlockSize = int64(fs.sb.BlockSize())
- stat.Blocks = fs.sb.BlocksCount()
- stat.BlocksFree = fs.sb.FreeBlocksCount()
- stat.BlocksAvailable = fs.sb.FreeBlocksCount()
- stat.Files = uint64(fs.sb.InodesCount())
- stat.FilesFree = uint64(fs.sb.FreeInodesCount())
- stat.NameLength = disklayout.MaxFileName
- stat.FragmentSize = int64(fs.sb.BlockSize())
- // TODO(b/134676337): Set Statfs.Flags and Statfs.FSID.
-}
-
-// AccessAt implements vfs.Filesystem.Impl.AccessAt.
-func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return inode.checkPermissions(rp.Credentials(), ats)
-}
-
-// GetDentryAt implements vfs.FilesystemImpl.GetDentryAt.
-func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- if opts.CheckSearchable {
- if !inode.isDir() {
- return nil, syserror.ENOTDIR
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
- return nil, err
- }
- }
-
- inode.incRef()
- return vfsd, nil
-}
-
-// GetParentDentryAt implements vfs.FilesystemImpl.GetParentDentryAt.
-func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) {
- vfsd, inode, err := fs.walk(ctx, rp, true)
- if err != nil {
- return nil, err
- }
- inode.incRef()
- return vfsd, nil
-}
-
-// OpenAt implements vfs.FilesystemImpl.OpenAt.
-func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- vfsd, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
-
- // EROFS is returned if write access is needed.
- if vfs.MayWriteFileWithOpenFlags(opts.Flags) || opts.Flags&(linux.O_CREAT|linux.O_EXCL|linux.O_TMPFILE) != 0 {
- return nil, syserror.EROFS
- }
- return inode.open(rp, vfsd, &opts)
-}
-
-// ReadlinkAt implements vfs.FilesystemImpl.ReadlinkAt.
-func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- symlink, ok := inode.impl.(*symlink)
- if !ok {
- return "", syserror.EINVAL
- }
- return symlink.target, nil
-}
-
-// StatAt implements vfs.FilesystemImpl.StatAt.
-func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return linux.Statx{}, err
- }
- var stat linux.Statx
- inode.statTo(&stat)
- return stat, nil
-}
-
-// StatFSAt implements vfs.FilesystemImpl.StatFSAt.
-func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linux.Statfs, error) {
- if _, _, err := fs.walk(ctx, rp, false); err != nil {
- return linux.Statfs{}, err
- }
-
- var stat linux.Statfs
- fs.statTo(&stat)
- return stat, nil
-}
-
-// Release implements vfs.FilesystemImpl.Release.
-func (fs *filesystem) Release(ctx context.Context) {
- fs.vfsfs.VirtualFilesystem().PutAnonBlockDevMinor(fs.devMinor)
-}
-
-// Sync implements vfs.FilesystemImpl.Sync.
-func (fs *filesystem) Sync(ctx context.Context) error {
- // This is a readonly filesystem for now.
- return nil
-}
-
-// The vfs.FilesystemImpl functions below return EROFS because their respective
-// man pages say that EROFS must be returned if the path resolves to a file on
-// this read-only filesystem.
-
-// LinkAt implements vfs.FilesystemImpl.LinkAt.
-func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
-func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- if _, _, err := fs.walk(ctx, rp, true); err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// MknodAt implements vfs.FilesystemImpl.MknodAt.
-func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// RenameAt implements vfs.FilesystemImpl.RenameAt.
-func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error {
- if rp.Done() {
- return syserror.ENOENT
- }
-
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// RmdirAt implements vfs.FilesystemImpl.RmdirAt.
-func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if !inode.isDir() {
- return syserror.ENOTDIR
- }
-
- return syserror.EROFS
-}
-
-// SetStatAt implements vfs.FilesystemImpl.SetStatAt.
-func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
-func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
- if rp.Done() {
- return syserror.EEXIST
- }
-
- _, _, err := fs.walk(ctx, rp, true)
- if err != nil {
- return err
- }
-
- return syserror.EROFS
-}
-
-// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.
-func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
-
- if inode.isDir() {
- return syserror.EISDIR
- }
-
- return syserror.EROFS
-}
-
-// BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt.
-func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) {
- _, inode, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- if err := inode.checkPermissions(rp.Credentials(), vfs.MayWrite); err != nil {
- return nil, err
- }
-
- // TODO(b/134676337): Support sockets.
- return nil, syserror.ECONNREFUSED
-}
-
-// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
-func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return nil, err
- }
- return nil, syserror.ENOTSUP
-}
-
-// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.
-func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return "", err
- }
- return "", syserror.ENOTSUP
-}
-
-// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
-func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return syserror.ENOTSUP
-}
-
-// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
-func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {
- _, _, err := fs.walk(ctx, rp, false)
- if err != nil {
- return err
- }
- return syserror.ENOTSUP
-}
-
-// PrependPath implements vfs.FilesystemImpl.PrependPath.
-func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDentry, b *fspath.Builder) error {
- fs.mu.RLock()
- defer fs.mu.RUnlock()
- return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
-}
-
-// MountOptions implements vfs.FilesystemImpl.MountOptions.
-func (fs *filesystem) MountOptions() string {
- return ""
-}
diff --git a/pkg/sentry/fsimpl/ext/inode.go b/pkg/sentry/fsimpl/ext/inode.go
deleted file mode 100644
index 4a555bf72..000000000
--- a/pkg/sentry/fsimpl/ext/inode.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "fmt"
- "sync/atomic"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// inode represents an ext inode.
-//
-// inode uses the same inheritance pattern that pkg/sentry/vfs structures use.
-// This has been done to increase memory locality.
-//
-// Implementations:
-// inode --
-// |-- dir
-// |-- symlink
-// |-- regular--
-// |-- extent file
-// |-- block map file
-//
-// +stateify savable
-type inode struct {
- // refs is a reference count. refs is accessed using atomic memory operations.
- refs int64
-
- // fs is the containing filesystem.
- fs *filesystem
-
- // inodeNum is the inode number of this inode on disk. This is used to
- // identify inodes within the ext filesystem.
- inodeNum uint32
-
- // blkSize is the fs data block size. Same as filesystem.sb.BlockSize().
- blkSize uint64
-
- // diskInode gives us access to the inode struct on disk. Immutable.
- diskInode disklayout.Inode
-
- locks vfs.FileLocks
-
- // This is immutable. The first field of the implementations must have inode
- // as the first field to ensure temporality.
- impl interface{}
-}
-
-// incRef increments the inode ref count.
-func (in *inode) incRef() {
- atomic.AddInt64(&in.refs, 1)
-}
-
-// tryIncRef tries to increment the ref count. Returns true if successful.
-func (in *inode) tryIncRef() bool {
- for {
- refs := atomic.LoadInt64(&in.refs)
- if refs == 0 {
- return false
- }
- if atomic.CompareAndSwapInt64(&in.refs, refs, refs+1) {
- return true
- }
- }
-}
-
-// decRef decrements the inode ref count and releases the inode resources if
-// the ref count hits 0.
-//
-// Precondition: Must have locked filesystem.mu.
-func (in *inode) decRef() {
- if refs := atomic.AddInt64(&in.refs, -1); refs == 0 {
- delete(in.fs.inodeCache, in.inodeNum)
- } else if refs < 0 {
- panic("ext.inode.decRef() called without holding a reference")
- }
-}
-
-// newInode is the inode constructor. Reads the inode off disk. Identifies
-// inodes based on the absolute inode number on disk.
-func newInode(fs *filesystem, inodeNum uint32) (*inode, error) {
- if inodeNum == 0 {
- panic("inode number 0 on ext filesystems is not possible")
- }
-
- inodeRecordSize := fs.sb.InodeSize()
- var diskInode disklayout.Inode
- if inodeRecordSize == disklayout.OldInodeSize {
- diskInode = &disklayout.InodeOld{}
- } else {
- diskInode = &disklayout.InodeNew{}
- }
-
- // Calculate where the inode is actually placed.
- inodesPerGrp := fs.sb.InodesPerGroup()
- blkSize := fs.sb.BlockSize()
- inodeTableOff := fs.bgs[getBGNum(inodeNum, inodesPerGrp)].InodeTable() * blkSize
- inodeOff := inodeTableOff + uint64(uint32(inodeRecordSize)*getBGOff(inodeNum, inodesPerGrp))
-
- if err := readFromDisk(fs.dev, int64(inodeOff), diskInode); err != nil {
- return nil, err
- }
-
- // Build the inode based on its type.
- args := inodeArgs{
- fs: fs,
- inodeNum: inodeNum,
- blkSize: blkSize,
- diskInode: diskInode,
- }
-
- switch diskInode.Mode().FileType() {
- case linux.ModeSymlink:
- f, err := newSymlink(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeRegular:
- f, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- case linux.ModeDirectory:
- f, err := newDirectory(args, fs.sb.IncompatibleFeatures().DirentFileType)
- if err != nil {
- return nil, err
- }
- return &f.inode, nil
- default:
- // TODO(b/134676337): Return appropriate errors for sockets, pipes and devices.
- return nil, syserror.EINVAL
- }
-}
-
-type inodeArgs struct {
- fs *filesystem
- inodeNum uint32
- blkSize uint64
- diskInode disklayout.Inode
-}
-
-func (in *inode) init(args inodeArgs, impl interface{}) {
- in.fs = args.fs
- in.inodeNum = args.inodeNum
- in.blkSize = args.blkSize
- in.diskInode = args.diskInode
- in.impl = impl
-}
-
-// open creates and returns a file description for the dentry passed in.
-func (in *inode) open(rp *vfs.ResolvingPath, vfsd *vfs.Dentry, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
- ats := vfs.AccessTypesForOpenFlags(opts)
- if err := in.checkPermissions(rp.Credentials(), ats); err != nil {
- return nil, err
- }
- mnt := rp.Mount()
- switch in.impl.(type) {
- case *regularFile:
- var fd regularFileFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *directory:
- // Can't open directories writably. This check is not necessary for a read
- // only filesystem but will be required when write is implemented.
- if ats&vfs.MayWrite != 0 {
- return nil, syserror.EISDIR
- }
- var fd directoryFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- case *symlink:
- if opts.Flags&linux.O_PATH == 0 {
- // Can't open symlinks without O_PATH.
- return nil, syserror.ELOOP
- }
- var fd symlinkFD
- fd.LockFD.Init(&in.locks)
- if err := fd.vfsfd.Init(&fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{}); err != nil {
- return nil, err
- }
- return &fd.vfsfd, nil
- default:
- panic(fmt.Sprintf("unknown inode type: %T", in.impl))
- }
-}
-
-func (in *inode) checkPermissions(creds *auth.Credentials, ats vfs.AccessTypes) error {
- return vfs.GenericCheckPermissions(creds, ats, in.diskInode.Mode(), in.diskInode.UID(), in.diskInode.GID())
-}
-
-// statTo writes the statx fields to the output parameter.
-func (in *inode) statTo(stat *linux.Statx) {
- stat.Mask = linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_NLINK |
- linux.STATX_UID | linux.STATX_GID | linux.STATX_INO | linux.STATX_SIZE |
- linux.STATX_ATIME | linux.STATX_CTIME | linux.STATX_MTIME
- stat.Blksize = uint32(in.blkSize)
- stat.Mode = uint16(in.diskInode.Mode())
- stat.Nlink = uint32(in.diskInode.LinksCount())
- stat.UID = uint32(in.diskInode.UID())
- stat.GID = uint32(in.diskInode.GID())
- stat.Ino = uint64(in.inodeNum)
- stat.Size = in.diskInode.Size()
- stat.Atime = in.diskInode.AccessTime().StatxTimestamp()
- stat.Ctime = in.diskInode.ChangeTime().StatxTimestamp()
- stat.Mtime = in.diskInode.ModificationTime().StatxTimestamp()
- stat.DevMajor = linux.UNNAMED_MAJOR
- stat.DevMinor = in.fs.devMinor
- // TODO(b/134676337): Set stat.Blocks which is the number of 512 byte blocks
- // (including metadata blocks) required to represent this file.
-}
-
-// getBGNum returns the block group number that a given inode belongs to.
-func getBGNum(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) / inodesPerGrp
-}
-
-// getBGOff returns the offset at which the given inode lives in the block
-// group's inode table, i.e. the index of the inode in the inode table.
-func getBGOff(inodeNum uint32, inodesPerGrp uint32) uint32 {
- return (inodeNum - 1) % inodesPerGrp
-}
diff --git a/pkg/sentry/fsimpl/ext/regular_file.go b/pkg/sentry/fsimpl/ext/regular_file.go
deleted file mode 100644
index 5ad9befcd..000000000
--- a/pkg/sentry/fsimpl/ext/regular_file.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/safemem"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// regularFile represents a regular file's inode. This too follows the
-// inheritance pattern prevelant in the vfs layer described in
-// pkg/sentry/vfs/README.md.
-//
-// +stateify savable
-type regularFile struct {
- inode inode
-
- // This is immutable. The first field of fileReader implementations must be
- // regularFile to ensure temporality.
- // io.ReaderAt is more strict than io.Reader in the sense that a partial read
- // is always accompanied by an error. If a read spans past the end of file, a
- // partial read (within file range) is done and io.EOF is returned.
- impl io.ReaderAt
-}
-
-// newRegularFile is the regularFile constructor. It figures out what kind of
-// file this is and initializes the fileReader.
-func newRegularFile(args inodeArgs) (*regularFile, error) {
- if args.diskInode.Flags().Extents {
- file, err := newExtentFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
- }
-
- file, err := newBlockMapFile(args)
- if err != nil {
- return nil, err
- }
- return &file.regFile, nil
-}
-
-func (in *inode) isRegular() bool {
- _, ok := in.impl.(*regularFile)
- return ok
-}
-
-// directoryFD represents a directory file description. It implements
-// vfs.FileDescriptionImpl.
-//
-// +stateify savable
-type regularFileFD struct {
- fileDescription
- vfs.LockFD
-
- // off is the file offset. off is accessed using atomic memory operations.
- off int64
-
- // offMu serializes operations that may mutate off.
- offMu sync.Mutex `state:"nosave"`
-}
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *regularFileFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- safeReader := safemem.FromIOReaderAt{
- ReaderAt: fd.inode().impl.(*regularFile).impl,
- Offset: offset,
- }
-
- // Copies data from disk directly into usermem without any intermediate
- // allocations (if dst is converted into BlockSeq such that it does not need
- // safe copying).
- return dst.CopyOutFrom(ctx, safeReader)
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *regularFileFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- n, err := fd.PRead(ctx, dst, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- // write(2) specifies that EBADF must be returned if the fd is not open for
- // writing.
- return 0, syserror.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- n, err := fd.PWrite(ctx, src, fd.off, opts)
- fd.offMu.Lock()
- fd.off += n
- fd.offMu.Unlock()
- return n, err
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *regularFileFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return syserror.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- fd.offMu.Lock()
- defer fd.offMu.Unlock()
- switch whence {
- case linux.SEEK_SET:
- // Use offset as specified.
- case linux.SEEK_CUR:
- offset += fd.off
- case linux.SEEK_END:
- offset += int64(fd.inode().diskInode.Size())
- default:
- return 0, syserror.EINVAL
- }
- if offset < 0 {
- return 0, syserror.EINVAL
- }
- fd.off = offset
- return offset, nil
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- // TODO(b/134676337): Implement mmap(2).
- return syserror.ENODEV
-}
diff --git a/pkg/sentry/fsimpl/ext/symlink.go b/pkg/sentry/fsimpl/ext/symlink.go
deleted file mode 100644
index 5e2bcc837..000000000
--- a/pkg/sentry/fsimpl/ext/symlink.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/pkg/usermem"
-)
-
-// symlink represents a symlink inode.
-//
-// +stateify savable
-type symlink struct {
- inode inode
- target string // immutable
-}
-
-// newSymlink is the symlink constructor. It reads out the symlink target from
-// the inode (however it might have been stored).
-func newSymlink(args inodeArgs) (*symlink, error) {
- var link []byte
-
- // If the symlink target is lesser than 60 bytes, its stores in inode.Data().
- // Otherwise either extents or block maps will be used to store the link.
- size := args.diskInode.Size()
- if size < 60 {
- link = args.diskInode.Data()[:size]
- } else {
- // Create a regular file out of this inode and read out the target.
- regFile, err := newRegularFile(args)
- if err != nil {
- return nil, err
- }
-
- link = make([]byte, size)
- if n, err := regFile.impl.ReadAt(link, 0); uint64(n) < size {
- return nil, err
- }
- }
-
- file := &symlink{target: string(link)}
- file.inode.init(args, file)
- return file, nil
-}
-
-func (in *inode) isSymlink() bool {
- _, ok := in.impl.(*symlink)
- return ok
-}
-
-// symlinkFD represents a symlink file description and implements
-// vfs.FileDescriptionImpl. which may only be used if open options contains
-// O_PATH. For this reason most of the functions return EBADF.
-//
-// +stateify savable
-type symlinkFD struct {
- fileDescription
- vfs.NoLockFD
-}
-
-// Compiles only if symlinkFD implements vfs.FileDescriptionImpl.
-var _ vfs.FileDescriptionImpl = (*symlinkFD)(nil)
-
-// Release implements vfs.FileDescriptionImpl.Release.
-func (fd *symlinkFD) Release(context.Context) {}
-
-// PRead implements vfs.FileDescriptionImpl.PRead.
-func (fd *symlinkFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// Read implements vfs.FileDescriptionImpl.Read.
-func (fd *symlinkFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// PWrite implements vfs.FileDescriptionImpl.PWrite.
-func (fd *symlinkFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// Write implements vfs.FileDescriptionImpl.Write.
-func (fd *symlinkFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
-func (fd *symlinkFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
- return syserror.ENOTDIR
-}
-
-// Seek implements vfs.FileDescriptionImpl.Seek.
-func (fd *symlinkFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- return 0, syserror.EBADF
-}
-
-// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
-func (fd *symlinkFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- return syserror.EBADF
-}
diff --git a/pkg/sentry/fsimpl/ext/utils.go b/pkg/sentry/fsimpl/ext/utils.go
deleted file mode 100644
index 58ef7b9b8..000000000
--- a/pkg/sentry/fsimpl/ext/utils.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ext
-
-import (
- "io"
-
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/sentry/fsimpl/ext/disklayout"
- "gvisor.dev/gvisor/pkg/syserror"
-)
-
-// readFromDisk performs a binary read from disk into the given struct from
-// the absolute offset provided.
-func readFromDisk(dev io.ReaderAt, abOff int64, v marshal.Marshallable) error {
- n := v.SizeBytes()
- buf := make([]byte, n)
- if read, _ := dev.ReadAt(buf, abOff); read < int(n) {
- return syserror.EIO
- }
-
- v.UnmarshalBytes(buf)
- return nil
-}
-
-// readSuperBlock reads the SuperBlock from block group 0 in the underlying
-// device. There are three versions of the superblock. This function identifies
-// and returns the correct version.
-func readSuperBlock(dev io.ReaderAt) (disklayout.SuperBlock, error) {
- var sb disklayout.SuperBlock = &disklayout.SuperBlockOld{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if sb.Revision() == disklayout.OldRev {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock32Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- if !sb.IncompatibleFeatures().Is64Bit {
- return sb, nil
- }
-
- sb = &disklayout.SuperBlock64Bit{}
- if err := readFromDisk(dev, disklayout.SbOffset, sb); err != nil {
- return nil, err
- }
- return sb, nil
-}
-
-// blockGroupsCount returns the number of block groups in the ext fs.
-func blockGroupsCount(sb disklayout.SuperBlock) uint64 {
- blocksCount := sb.BlocksCount()
- blocksPerGroup := uint64(sb.BlocksPerGroup())
-
- // Round up the result. float64 can compromise precision so do it manually.
- return (blocksCount + blocksPerGroup - 1) / blocksPerGroup
-}
-
-// readBlockGroups reads the block group descriptor table from block group 0 in
-// the underlying device.
-func readBlockGroups(dev io.ReaderAt, sb disklayout.SuperBlock) ([]disklayout.BlockGroup, error) {
- bgCount := blockGroupsCount(sb)
- bgdSize := uint64(sb.BgDescSize())
- is64Bit := sb.IncompatibleFeatures().Is64Bit
- bgds := make([]disklayout.BlockGroup, bgCount)
-
- for i, off := uint64(0), uint64(sb.FirstDataBlock()+1)*sb.BlockSize(); i < bgCount; i, off = i+1, off+bgdSize {
- if is64Bit {
- bgds[i] = &disklayout.BlockGroup64Bit{}
- } else {
- bgds[i] = &disklayout.BlockGroup32Bit{}
- }
-
- if err := readFromDisk(dev, int64(off), bgds[i]); err != nil {
- return nil, err
- }
- }
- return bgds, nil
-}
diff --git a/pkg/sentry/fsimpl/fuse/BUILD b/pkg/sentry/fsimpl/fuse/BUILD
index 3a4777fbe..05c4fbeb2 100644
--- a/pkg/sentry/fsimpl/fuse/BUILD
+++ b/pkg/sentry/fsimpl/fuse/BUILD
@@ -46,6 +46,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/marshal",
@@ -58,7 +59,6 @@ go_library(
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -76,13 +76,13 @@ go_test(
library = ":fuse",
deps = [
"//pkg/abi/linux",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/marshal",
"//pkg/sentry/fsimpl/testutil",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
diff --git a/pkg/sentry/fsimpl/fuse/connection.go b/pkg/sentry/fsimpl/fuse/connection.go
index 077bf9307..d404edaf0 100644
--- a/pkg/sentry/fsimpl/fuse/connection.go
+++ b/pkg/sentry/fsimpl/fuse/connection.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -252,11 +252,11 @@ func (conn *connection) Call(t *kernel.Task, r *Request) (*Response, error) {
}
if !conn.connected {
- return nil, syserror.ENOTCONN
+ return nil, linuxerr.ENOTCONN
}
if conn.connInitError {
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
fut, err := conn.callFuture(t, r)
@@ -306,7 +306,7 @@ func (conn *connection) callFutureLocked(t *kernel.Task, r *Request) (*futureRes
conn.mu.Unlock()
// we checked connected before,
// this must be due to aborted connection.
- return nil, syserror.ECONNABORTED
+ return nil, linuxerr.ECONNABORTED
}
conn.mu.Unlock()
diff --git a/pkg/sentry/fsimpl/fuse/connection_test.go b/pkg/sentry/fsimpl/fuse/connection_test.go
index 78ea6a31e..1fddd858e 100644
--- a/pkg/sentry/fsimpl/fuse/connection_test.go
+++ b/pkg/sentry/fsimpl/fuse/connection_test.go
@@ -19,9 +19,9 @@ import (
"testing"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
)
// TestConnectionInitBlock tests if initialization
@@ -104,7 +104,7 @@ func TestConnectionAbort(t *testing.T) {
// After abort, Call() should return directly with ENOTCONN.
req := conn.NewRequest(creds, 0, 0, 0, testObj)
_, err = conn.Call(task, req)
- if err != syserror.ENOTCONN {
+ if !linuxerr.Equals(linuxerr.ENOTCONN, err) {
t.Fatalf("Incorrect error code received for Call() after connection aborted")
}
diff --git a/pkg/sentry/fsimpl/fuse/dev.go b/pkg/sentry/fsimpl/fuse/dev.go
index 5d2bae14e..0f855ac59 100644
--- a/pkg/sentry/fsimpl/fuse/dev.go
+++ b/pkg/sentry/fsimpl/fuse/dev.go
@@ -18,11 +18,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -37,7 +37,7 @@ type fuseDevice struct{}
// Open implements vfs.Device.Open.
func (fuseDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
if !kernel.FUSEEnabled {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
var fd DeviceFD
@@ -122,17 +122,17 @@ func (fd *DeviceFD) Release(ctx context.Context) {
func (fd *DeviceFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.
if fd.fs == nil {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// Read implements vfs.FileDescriptionImpl.Read.
func (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.
if fd.fs == nil {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
// We require that any Read done on this filesystem have a sane minimum
@@ -149,7 +149,7 @@ func (fd *DeviceFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.R
// If the read buffer is too small, error out.
if dst.NumBytes() < int64(minBuffSize) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
fd.mu.Lock()
@@ -191,7 +191,7 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts
}
if req == nil {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// We already checked the size: dst must be able to fit the whole request.
@@ -204,7 +204,7 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts
return 0, err
}
if n != len(req.data) {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
if req.hdr.Opcode == linux.FUSE_WRITE {
@@ -213,7 +213,7 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts
return 0, err
}
if written != len(req.payload) {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
n += int(written)
}
@@ -234,10 +234,10 @@ func (fd *DeviceFD) readLocked(ctx context.Context, dst usermem.IOSequence, opts
func (fd *DeviceFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.
if fd.fs == nil {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// Write implements vfs.FileDescriptionImpl.Write.
@@ -251,12 +251,12 @@ func (fd *DeviceFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.
func (fd *DeviceFD) writeLocked(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.
if fd.fs == nil {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
// Return ENODEV if the filesystem is umounted.
if fd.fs.umounted {
- return 0, syserror.ENODEV
+ return 0, linuxerr.ENODEV
}
var cn, n int64
@@ -293,7 +293,7 @@ func (fd *DeviceFD) writeLocked(ctx context.Context, src usermem.IOSequence, opt
// Assert that the header isn't read into the writeBuf yet.
if fd.writeCursor >= hdrLen {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// We don't have the full common response header yet.
@@ -322,7 +322,7 @@ func (fd *DeviceFD) writeLocked(ctx context.Context, src usermem.IOSequence, opt
if !ok {
// Server sent us a response for a request we never sent,
// or for which we already received a reply (e.g. aborted), an unlikely event.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
delete(fd.completions, hdr.Unique)
@@ -391,10 +391,10 @@ func (fd *DeviceFD) EventUnregister(e *waiter.Entry) {
func (fd *DeviceFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
// Operations on /dev/fuse don't make sense until a FUSE filesystem is mounted.
if fd.fs == nil {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
// sendResponse sends a response to the waiting task (if any).
@@ -434,7 +434,7 @@ func (fd *DeviceFD) sendError(ctx context.Context, errno int32, unique linux.FUS
if !ok {
// A response for a request we never sent,
// or for which we already received a reply (e.g. aborted).
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
delete(fd.completions, respHdr.Unique)
diff --git a/pkg/sentry/fsimpl/fuse/dev_test.go b/pkg/sentry/fsimpl/fuse/dev_test.go
index 04250d796..8951b5ba8 100644
--- a/pkg/sentry/fsimpl/fuse/dev_test.go
+++ b/pkg/sentry/fsimpl/fuse/dev_test.go
@@ -20,11 +20,11 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -186,7 +186,7 @@ func ReadTest(serverTask *kernel.Task, fd *vfs.FileDescription, inIOseq usermem.
// "would block".
n, err = dev.Read(serverTask, inIOseq, vfs.ReadOptions{})
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
diff --git a/pkg/sentry/fsimpl/fuse/directory.go b/pkg/sentry/fsimpl/fuse/directory.go
index fcc5d9a2a..9611edd5a 100644
--- a/pkg/sentry/fsimpl/fuse/directory.go
+++ b/pkg/sentry/fsimpl/fuse/directory.go
@@ -19,10 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -32,27 +32,27 @@ type directoryFD struct {
// Allocate implements directoryFD.Allocate.
func (*directoryFD) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
// PRead implements vfs.FileDescriptionImpl.PRead.
func (*directoryFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Read implements vfs.FileDescriptionImpl.Read.
func (*directoryFD) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// PWrite implements vfs.FileDescriptionImpl.PWrite.
func (*directoryFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Write implements vfs.FileDescriptionImpl.Write.
func (*directoryFD) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
diff --git a/pkg/sentry/fsimpl/fuse/fusefs.go b/pkg/sentry/fsimpl/fuse/fusefs.go
index 167c899e2..af16098d2 100644
--- a/pkg/sentry/fsimpl/fuse/fusefs.go
+++ b/pkg/sentry/fsimpl/fuse/fusefs.go
@@ -23,13 +23,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -121,30 +121,30 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
deviceDescriptorStr, ok := mopts["fd"]
if !ok {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option fd missing")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
delete(mopts, "fd")
deviceDescriptor, err := strconv.ParseInt(deviceDescriptorStr, 10 /* base */, 32 /* bitSize */)
if err != nil {
ctx.Debugf("fusefs.FilesystemType.GetFilesystem: invalid fd: %q (%v)", deviceDescriptorStr, err)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("%s.GetFilesystem: couldn't get kernel task from context", fsType.Name())
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fuseFDGeneric := kernelTask.GetFileVFS2(int32(deviceDescriptor))
if fuseFDGeneric == nil {
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
defer fuseFDGeneric.DecRef(ctx)
fuseFD, ok := fuseFDGeneric.Impl().(*DeviceFD)
if !ok {
log.Warningf("%s.GetFilesystem: device FD is %T, not a FUSE device", fsType.Name, fuseFDGeneric)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Parse and set all the other supported FUSE mount options.
@@ -154,17 +154,17 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
uid, err := strconv.ParseUint(uidStr, 10, 32)
if err != nil {
log.Warningf("%s.GetFilesystem: invalid user_id: user_id=%s", fsType.Name(), uidStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
kuid := creds.UserNamespace.MapToKUID(auth.UID(uid))
if !kuid.Ok() {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: unmapped uid: %d", uid)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.uid = kuid
} else {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option user_id missing")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if gidStr, ok := mopts["group_id"]; ok {
@@ -172,17 +172,17 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
gid, err := strconv.ParseUint(gidStr, 10, 32)
if err != nil {
log.Warningf("%s.GetFilesystem: invalid group_id: group_id=%s", fsType.Name(), gidStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
kgid := creds.UserNamespace.MapToKGID(auth.GID(gid))
if !kgid.Ok() {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: unmapped gid: %d", gid)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.gid = kgid
} else {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option group_id missing")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if modeStr, ok := mopts["rootmode"]; ok {
@@ -190,12 +190,12 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
log.Warningf("%s.GetFilesystem: invalid mode: %q", fsType.Name(), modeStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.rootMode = linux.FileMode(mode)
} else {
ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option rootmode missing")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Set the maxInFlightRequests option.
@@ -206,7 +206,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
maxRead, err := strconv.ParseUint(maxReadStr, 10, 32)
if err != nil {
log.Warningf("%s.GetFilesystem: invalid max_read: max_read=%s", fsType.Name(), maxReadStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if maxRead < fuseMinMaxRead {
maxRead = fuseMinMaxRead
@@ -229,7 +229,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Check for unparsed options.
if len(mopts) != 0 {
log.Warningf("%s.GetFilesystem: unsupported or unknown options: %v", fsType.Name(), mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Create a new FUSE filesystem.
@@ -258,7 +258,7 @@ func newFUSEFilesystem(ctx context.Context, vfsObj *vfs.VirtualFilesystem, fsTyp
conn, err := newFUSEConnection(ctx, fuseFD, opts)
if err != nil {
log.Warningf("fuse.NewFUSEFilesystem: NewFUSEConnection failed with error: %v", err)
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
fs := &filesystem{
@@ -375,7 +375,7 @@ func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, a
creds.RealKGID != i.fs.opts.gid ||
creds.EffectiveKGID != i.fs.opts.gid ||
creds.SavedKGID != i.fs.opts.gid {
- return syserror.EACCES
+ return linuxerr.EACCES
}
}
@@ -393,10 +393,10 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentr
isDir := i.InodeAttrs.Mode().IsDir()
// return error if specified to open directory but inode is not a directory.
if !isDir && opts.Mode.IsDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if opts.Flags&linux.O_LARGEFILE == 0 && atomic.LoadUint64(&i.size) > linux.MAX_NON_LFS {
- return nil, syserror.EOVERFLOW
+ return nil, linuxerr.EOVERFLOW
}
var fd *fileDescription
@@ -418,7 +418,7 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentr
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("fusefs.Inode.Open: couldn't get kernel task from context")
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
// Build the request.
@@ -440,7 +440,7 @@ func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentr
if err != nil {
return nil, err
}
- if err := res.Error(); err == syserror.ENOSYS && !isDir {
+ if err := res.Error(); linuxerr.Equals(linuxerr.ENOSYS, err) && !isDir {
i.fs.conn.noOpen = true
} else if err != nil {
return nil, err
@@ -512,7 +512,7 @@ func (i *inode) NewFile(ctx context.Context, name string, opts vfs.OpenOptions)
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("fusefs.Inode.NewFile: couldn't get kernel task from context", i.nodeID)
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
in := linux.FUSECreateIn{
CreateMeta: linux.FUSECreateMeta{
@@ -552,7 +552,7 @@ func (i *inode) Unlink(ctx context.Context, name string, child kernfs.Inode) err
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("fusefs.Inode.newEntry: couldn't get kernel task from context", i.nodeID)
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
in := linux.FUSEUnlinkIn{Name: name}
req := i.fs.conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(kernelTask.ThreadID()), i.nodeID, linux.FUSE_UNLINK, &in)
@@ -596,7 +596,7 @@ func (i *inode) newEntry(ctx context.Context, name string, fileType linux.FileMo
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("fusefs.Inode.newEntry: couldn't get kernel task from context", i.nodeID)
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
req := i.fs.conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(kernelTask.ThreadID()), i.nodeID, opcode, payload)
res, err := i.fs.conn.Call(kernelTask, req)
@@ -611,7 +611,7 @@ func (i *inode) newEntry(ctx context.Context, name string, fileType linux.FileMo
return nil, err
}
if opcode != linux.FUSE_LOOKUP && ((out.Attr.Mode&linux.S_IFMT)^uint32(fileType) != 0 || out.NodeID == 0 || out.NodeID == linux.FUSE_ROOT_ID) {
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
child := i.fs.newInode(ctx, out.NodeID, out.Attr)
return child, nil
@@ -626,13 +626,13 @@ func (i *inode) Getlink(ctx context.Context, mnt *vfs.Mount) (vfs.VirtualDentry,
// Readlink implements kernfs.Inode.Readlink.
func (i *inode) Readlink(ctx context.Context, mnt *vfs.Mount) (string, error) {
if i.Mode().FileType()&linux.S_IFLNK == 0 {
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
if len(i.link) == 0 {
kernelTask := kernel.TaskFromContext(ctx)
if kernelTask == nil {
log.Warningf("fusefs.Inode.Readlink: couldn't get kernel task from context")
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
req := i.fs.conn.NewRequest(auth.CredentialsFromContext(ctx), uint32(kernelTask.ThreadID()), i.nodeID, linux.FUSE_READLINK, &linux.FUSEEmptyIn{})
res, err := i.fs.conn.Call(kernelTask, req)
@@ -728,7 +728,7 @@ func (i *inode) getAttr(ctx context.Context, fs *vfs.Filesystem, opts vfs.StatOp
task := kernel.TaskFromContext(ctx)
if task == nil {
log.Warningf("couldn't get kernel task from context")
- return linux.FUSEAttr{}, syserror.EINVAL
+ return linux.FUSEAttr{}, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(ctx)
@@ -833,7 +833,7 @@ func (i *inode) setAttr(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre
task := kernel.TaskFromContext(ctx)
if task == nil {
log.Warningf("couldn't get kernel task from context")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// We should retain the original file type when assigning new mode.
diff --git a/pkg/sentry/fsimpl/fuse/read_write.go b/pkg/sentry/fsimpl/fuse/read_write.go
index 66ea889f9..fe119aa43 100644
--- a/pkg/sentry/fsimpl/fuse/read_write.go
+++ b/pkg/sentry/fsimpl/fuse/read_write.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ReadInPages sends FUSE_READ requests for the size after round it up to
@@ -39,7 +39,7 @@ func (fs *filesystem) ReadInPages(ctx context.Context, fd *regularFileFD, off ui
t := kernel.TaskFromContext(ctx)
if t == nil {
log.Warningf("fusefs.Read: couldn't get kernel task from context")
- return nil, 0, syserror.EINVAL
+ return nil, 0, linuxerr.EINVAL
}
// Round up to a multiple of page size.
@@ -155,7 +155,7 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,
t := kernel.TaskFromContext(ctx)
if t == nil {
log.Warningf("fusefs.Read: couldn't get kernel task from context")
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// One request cannnot exceed either maxWrite or maxPages.
@@ -220,7 +220,7 @@ func (fs *filesystem) Write(ctx context.Context, fd *regularFileFD, off uint64,
// Write more than requested? EIO.
if out.Size > toWrite {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
written += out.Size
diff --git a/pkg/sentry/fsimpl/fuse/regular_file.go b/pkg/sentry/fsimpl/fuse/regular_file.go
index 5bdd096c3..38cde8208 100644
--- a/pkg/sentry/fsimpl/fuse/regular_file.go
+++ b/pkg/sentry/fsimpl/fuse/regular_file.go
@@ -22,8 +22,8 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -39,14 +39,14 @@ type regularFileFD struct {
// PRead implements vfs.FileDescriptionImpl.PRead.
func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
size := dst.NumBytes()
@@ -56,7 +56,7 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
} else if size > math.MaxUint32 {
// FUSE only supports uint32 for size.
// Overflow.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// TODO(gvisor.dev/issue/3678): Add direct IO support.
@@ -107,7 +107,7 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
return 0, err
}
if int64(cp) != toCopy {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
copied += toCopy
}
@@ -143,14 +143,14 @@ func (fd *regularFileFD) Write(ctx context.Context, src usermem.IOSequence, opts
// final offset should be ignored by PWrite.
func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {
if offset < 0 {
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, offset, syserror.EOPNOTSUPP
+ return 0, offset, linuxerr.EOPNOTSUPP
}
inode := fd.inode()
@@ -171,11 +171,11 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
if srclen > math.MaxUint32 {
// FUSE only supports uint32 for size.
// Overflow.
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
if end := offset + srclen; end < offset {
// Overflow.
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
srclen, err = vfs.CheckLimit(ctx, offset, srclen)
@@ -204,7 +204,7 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
return 0, offset, err
}
if int64(cp) != srclen {
- return 0, offset, syserror.EIO
+ return 0, offset, linuxerr.EIO
}
n, err := fd.inode().fs.Write(ctx, fd, uint64(offset), uint32(srclen), data)
@@ -215,7 +215,7 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
if n == 0 {
// We have checked srclen != 0 previously.
// If err == nil, then it's a short write and we return EIO.
- return 0, offset, syserror.EIO
+ return 0, offset, linuxerr.EIO
}
written = int64(n)
diff --git a/pkg/sentry/fsimpl/gofer/BUILD b/pkg/sentry/fsimpl/gofer/BUILD
index 368272f12..4244f2cf5 100644
--- a/pkg/sentry/fsimpl/gofer/BUILD
+++ b/pkg/sentry/fsimpl/gofer/BUILD
@@ -49,6 +49,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/fdnotifier",
"//pkg/fspath",
@@ -78,7 +79,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/unet",
"//pkg/usermem",
"//pkg/waiter",
diff --git a/pkg/sentry/fsimpl/gofer/directory.go b/pkg/sentry/fsimpl/gofer/directory.go
index 177e42649..5c48a9fee 100644
--- a/pkg/sentry/fsimpl/gofer/directory.go
+++ b/pkg/sentry/fsimpl/gofer/directory.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/refsvfs2"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
func (d *dentry) isDir() bool {
@@ -297,7 +297,7 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in
switch whence {
case linux.SEEK_SET:
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset == 0 {
// Ensure that the next call to fd.IterDirents() calls
@@ -309,13 +309,13 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in
case linux.SEEK_CUR:
offset += fd.off
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Don't clear fd.dirents in this case, even if offset == 0.
fd.off = offset
return fd.off, nil
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/fsimpl/gofer/filesystem.go b/pkg/sentry/fsimpl/gofer/filesystem.go
index eb09d54c3..00228c469 100644
--- a/pkg/sentry/fsimpl/gofer/filesystem.go
+++ b/pkg/sentry/fsimpl/gofer/filesystem.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/host"
@@ -32,32 +33,19 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Sync implements vfs.FilesystemImpl.Sync.
func (fs *filesystem) Sync(ctx context.Context) error {
// Snapshot current syncable dentries and special file FDs.
- fs.renameMu.RLock()
fs.syncMu.Lock()
ds := make([]*dentry, 0, len(fs.syncableDentries))
for d := range fs.syncableDentries {
- // It's safe to use IncRef here even though fs.syncableDentries doesn't
- // hold references since we hold fs.renameMu. Note that we can't use
- // TryIncRef since cached dentries at zero references should still be
- // synced.
- d.IncRef()
ds = append(ds, d)
}
- fs.renameMu.RUnlock()
sffds := make([]*specialFileFD, 0, len(fs.specialFileFDs))
for sffd := range fs.specialFileFDs {
- // As above, fs.specialFileFDs doesn't hold references. However, unlike
- // dentries, an FD that has reached zero references can't be
- // resurrected, so we can use TryIncRef.
- if sffd.vfsfd.TryIncRef() {
- sffds = append(sffds, sffd)
- }
+ sffds = append(sffds, sffd)
}
fs.syncMu.Unlock()
@@ -67,9 +55,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {
// Sync syncable dentries.
for _, d := range ds {
- err := d.syncCachedFile(ctx, true /* forFilesystemSync */)
- d.DecRef(ctx)
- if err != nil {
+ if err := d.syncCachedFile(ctx, true /* forFilesystemSync */); err != nil {
ctx.Infof("gofer.filesystem.Sync: dentry.syncCachedFile failed: %v", err)
if retErr == nil {
retErr = err
@@ -80,9 +66,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {
// Sync special files, which may be writable but do not use dentry shared
// handles (so they won't be synced by the above).
for _, sffd := range sffds {
- err := sffd.sync(ctx, true /* forFilesystemSync */)
- sffd.vfsfd.DecRef(ctx)
- if err != nil {
+ if err := sffd.sync(ctx, true /* forFilesystemSync */); err != nil {
ctx.Infof("gofer.filesystem.Sync: specialFileFD.sync failed: %v", err)
if retErr == nil {
retErr = err
@@ -146,6 +130,7 @@ func putDentrySlice(ds *[]*dentry) {
// but dentry slices are allocated lazily, and it's much easier to say "defer
// fs.renameMuRUnlockAndCheckCaching(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckCaching(ds) }()" to work around this.
+// +checklocksrelease:fs.renameMu
func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
if *dsp == nil {
@@ -158,6 +143,7 @@ func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, dsp **
putDentrySlice(*dsp)
}
+// +checklocksrelease:fs.renameMu
func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) {
if *ds == nil {
fs.renameMu.Unlock()
@@ -186,7 +172,7 @@ func (fs *filesystem) renameMuUnlockAndCheckCaching(ctx context.Context, ds **[]
// Postconditions: The returned dentry's cached metadata is up to date.
func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, bool, error) {
if !d.isDir() {
- return nil, false, syserror.ENOTDIR
+ return nil, false, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, false, err
@@ -244,18 +230,18 @@ afterSymlink:
// * dentry at name has been revalidated
func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name string, ds **[]*dentry) (*dentry, error) {
if len(name) > maxFilenameLen {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
if child, ok := parent.children[name]; ok || parent.isSynthetic() {
if child == nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
return child, nil
}
qid, file, attrMask, attr, err := parent.file.walkGetAttrOne(ctx, name)
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
parent.cacheNegativeLookupLocked(name)
}
return nil, err
@@ -302,7 +288,7 @@ func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving
}
}
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -330,7 +316,7 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
}
}
if rp.MustBeDir() && !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -359,10 +345,10 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
}
name := rp.Component()
if name == "." || name == ".." {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if parent.isDeleted() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := fs.revalidateOne(ctx, rp.VirtualFilesystem(), parent, name, &ds); err != nil {
return err
@@ -372,20 +358,20 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
defer parent.dirMu.Unlock()
if len(name) > maxFilenameLen {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
// Check for existence only if caching information is available. Otherwise,
// don't check for existence just yet. We will check for existence if the
// checks for writability fail below. Existence check is done by the creation
// RPCs themselves.
if child, ok := parent.children[name]; ok && child != nil {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
checkExistence := func() error {
- if child, err := fs.getChildLocked(ctx, parent, name, &ds); err != nil && err != syserror.ENOENT {
+ if child, err := fs.getChildLocked(ctx, parent, name, &ds); err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
return err
} else if child != nil {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
return nil
}
@@ -408,11 +394,11 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
return err
}
if !dir && rp.MustBeDir() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if parent.isSynthetic() {
if createInSyntheticDir == nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := createInSyntheticDir(parent, name); err != nil {
return err
@@ -469,14 +455,14 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
name := rp.Component()
if dir {
if name == "." {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if name == ".." {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
} else {
if name == "." || name == ".." {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
}
@@ -499,7 +485,7 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
child, ok = parent.children[name]
if ok && child == nil {
// Hit a negative cached entry, child doesn't exist.
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
} else {
child, _, err = fs.stepLocked(ctx, rp, parent, false /* mayFollowSymlinks */, &ds)
@@ -539,8 +525,8 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
if child.syntheticChildren != 0 {
// This is definitely not an empty directory, irrespective of
// fs.opts.interop.
- vfsObj.AbortDeleteDentry(&child.vfsd)
- return syserror.ENOTEMPTY
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: PrepareDeleteDentry called if child != nil.
+ return linuxerr.ENOTEMPTY
}
// If InteropModeShared is in effect and the first call to
// PrepareDeleteDentry above succeeded, then child wasn't
@@ -549,13 +535,13 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
// still exist) would be a waste of time.
if child.cachedMetadataAuthoritative() {
if !child.isDir() {
- vfsObj.AbortDeleteDentry(&child.vfsd)
- return syserror.ENOTDIR
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above.
+ return linuxerr.ENOTDIR
}
for _, grandchild := range child.children {
if grandchild != nil {
- vfsObj.AbortDeleteDentry(&child.vfsd)
- return syserror.ENOTEMPTY
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above.
+ return linuxerr.ENOTEMPTY
}
}
}
@@ -564,25 +550,25 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
} else {
// child must be a non-directory file.
if child != nil && child.isDir() {
- vfsObj.AbortDeleteDentry(&child.vfsd)
- return syserror.EISDIR
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above.
+ return linuxerr.EISDIR
}
if rp.MustBeDir() {
if child != nil {
- vfsObj.AbortDeleteDentry(&child.vfsd)
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above.
}
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
if parent.isSynthetic() {
if child == nil {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
} else if child == nil || !child.isSynthetic() {
err = parent.file.unlinkAt(ctx, name, flags)
if err != nil {
if child != nil {
- vfsObj.AbortDeleteDentry(&child.vfsd)
+ vfsObj.AbortDeleteDentry(&child.vfsd) // +checklocksforce: see above.
}
return err
}
@@ -600,7 +586,7 @@ func (fs *filesystem) unlinkAt(ctx context.Context, rp *vfs.ResolvingPath, dir b
}
if child != nil {
- vfsObj.CommitDeleteDentry(ctx, &child.vfsd)
+ vfsObj.CommitDeleteDentry(ctx, &child.vfsd) // +checklocksforce: see above.
child.setDeleted()
if child.isSynthetic() {
parent.syntheticChildren--
@@ -642,7 +628,7 @@ func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, op
}
if opts.CheckSearchable {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -674,11 +660,11 @@ func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa
func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, _ **[]*dentry) error {
if rp.Mount() != vd.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
d := vd.Dentry().Impl().(*dentry)
if d.isDir() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
gid := auth.KGID(atomic.LoadUint32(&d.gid))
uid := auth.KUID(atomic.LoadUint32(&d.uid))
@@ -687,10 +673,10 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.
return err
}
if d.nlink == 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if d.nlink == math.MaxUint32 {
- return syserror.EMLINK
+ return linuxerr.EMLINK
}
if err := parent.file.link(ctx, d.file, childName); err != nil {
return err
@@ -715,7 +701,7 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
mode |= linux.S_ISGID
}
if _, err := parent.file.mkdir(ctx, name, p9.FileMode(mode), (p9.UID)(creds.EffectiveKUID), p9.GID(kgid)); err != nil {
- if !opts.ForSyntheticMountpoint || err == syserror.EEXIST {
+ if !opts.ForSyntheticMountpoint || linuxerr.Equals(linuxerr.EEXIST, err) {
return err
}
ctx.Infof("Failed to create remote directory %q: %v; falling back to synthetic directory", name, err)
@@ -734,7 +720,7 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
}, func(parent *dentry, name string) error {
if !opts.ForSyntheticMountpoint {
// Can't create non-synthetic files in synthetic directories.
- return syserror.EPERM
+ return linuxerr.EPERM
}
parent.createSyntheticChildLocked(&createSyntheticOpts{
name: name,
@@ -752,7 +738,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, name string, ds **[]*dentry) error {
creds := rp.Credentials()
_, err := parent.file.mknod(ctx, name, (p9.FileMode)(opts.Mode), opts.DevMajor, opts.DevMinor, (p9.UID)(creds.EffectiveKUID), (p9.GID)(creds.EffectiveKGID))
- if err != syserror.EPERM {
+ if !linuxerr.Equals(linuxerr.EPERM, err) {
return err
}
@@ -764,8 +750,8 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
switch {
case err == nil:
// Step succeeded, another file exists.
- return syserror.EEXIST
- case err != syserror.ENOENT:
+ return linuxerr.EEXIST
+ case !linuxerr.Equals(linuxerr.ENOENT, err):
// Unexpected error.
return err
}
@@ -793,7 +779,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
return nil
}
// Retain error from gofer if synthetic file cannot be created internally.
- return syserror.EPERM
+ return linuxerr.EPERM
}, nil)
}
@@ -804,7 +790,7 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
// support, and it isn't clear that there's any way to implement this in
// 9P.
if opts.Flags&linux.O_TMPFILE != 0 {
- return nil, syserror.EOPNOTSUPP
+ return nil, linuxerr.EOPNOTSUPP
}
mayCreate := opts.Flags&linux.O_CREAT != 0
mustCreate := opts.Flags&(linux.O_CREAT|linux.O_EXCL) == (linux.O_CREAT | linux.O_EXCL)
@@ -824,10 +810,10 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
if rp.Done() {
// Reject attempts to open mount root directory with O_CREAT.
if mayCreate && rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
if !start.cachedMetadataAuthoritative() {
// Refresh dentry's attributes before opening.
@@ -854,7 +840,7 @@ afterTrailingSymlink:
}
// Reject attempts to open directories with O_CREAT.
if mayCreate && rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if err := fs.revalidateOne(ctx, rp.VirtualFilesystem(), parent, rp.Component(), &ds); err != nil {
return nil, err
@@ -862,10 +848,10 @@ afterTrailingSymlink:
// Determine whether or not we need to create a file.
parent.dirMu.Lock()
child, _, err := fs.stepLocked(ctx, rp, parent, false /* mayFollowSymlinks */, &ds)
- if err == syserror.ENOENT && mayCreate {
+ if linuxerr.Equals(linuxerr.ENOENT, err) && mayCreate {
if parent.isSynthetic() {
parent.dirMu.Unlock()
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
fd, err := parent.createAndOpenChildLocked(ctx, rp, &opts, &ds)
parent.dirMu.Unlock()
@@ -876,7 +862,7 @@ afterTrailingSymlink:
return nil, err
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
// Open existing child or follow symlink.
if child.isSymlink() && rp.ShouldFollowSymlink() {
@@ -891,7 +877,7 @@ afterTrailingSymlink:
goto afterTrailingSymlink
}
if rp.MustBeDir() && !child.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
child.IncRef()
defer child.DecRef(ctx)
@@ -935,14 +921,14 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open
case linux.S_IFDIR:
// Can't open directories with O_CREAT.
if opts.Flags&linux.O_CREAT != 0 {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
// Can't open directories writably.
if ats&vfs.MayWrite != 0 {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if opts.Flags&linux.O_DIRECT != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
if !d.isSynthetic() {
if err := d.ensureSharedHandle(ctx, ats&vfs.MayRead != 0, false /* write */, false /* trunc */); err != nil {
@@ -962,10 +948,10 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open
return &fd.vfsfd, nil
case linux.S_IFLNK:
// Can't open symlinks without O_PATH, which is handled at the VFS layer.
- return nil, syserror.ELOOP
+ return nil, linuxerr.ELOOP
case linux.S_IFSOCK:
if d.isSynthetic() {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
if d.fs.iopts.OpenSocketsByConnecting {
return d.openSocketByConnecting(ctx, opts)
@@ -998,7 +984,7 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open
func (d *dentry) openSocketByConnecting(ctx context.Context, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
if opts.Flags&linux.O_DIRECT != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
fdObj, err := d.file.connect(ctx, p9.AnonymousSocket)
if err != nil {
@@ -1019,7 +1005,7 @@ func (d *dentry) openSocketByConnecting(ctx context.Context, opts *vfs.OpenOptio
func (d *dentry) openSpecialFile(ctx context.Context, mnt *vfs.Mount, opts *vfs.OpenOptions) (*vfs.FileDescription, error) {
ats := vfs.AccessTypesForOpenFlags(opts)
if opts.Flags&linux.O_DIRECT != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
// We assume that the server silently inserts O_NONBLOCK in the open flags
// for all named pipes (because all existing gofers do this).
@@ -1033,7 +1019,7 @@ func (d *dentry) openSpecialFile(ctx context.Context, mnt *vfs.Mount, opts *vfs.
retry:
h, err := openHandle(ctx, d.file, ats.MayRead(), ats.MayWrite(), opts.Flags&linux.O_TRUNC != 0)
if err != nil {
- if isBlockingOpenOfNamedPipe && ats == vfs.MayWrite && err == syserror.ENXIO {
+ if isBlockingOpenOfNamedPipe && ats == vfs.MayWrite && linuxerr.Equals(linuxerr.ENXIO, err) {
// An attempt to open a named pipe with O_WRONLY|O_NONBLOCK fails
// with ENXIO if opening the same named pipe with O_WRONLY would
// block because there are no readers of the pipe.
@@ -1067,7 +1053,7 @@ func (d *dentry) createAndOpenChildLocked(ctx context.Context, rp *vfs.Resolving
return nil, err
}
if d.isDeleted() {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
mnt := rp.Mount()
if err := mnt.CheckBeginWrite(); err != nil {
@@ -1187,7 +1173,7 @@ func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (st
return "", err
}
if !d.isSymlink() {
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
return d.readlink(ctx, rp.Mount())
}
@@ -1204,24 +1190,24 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
if opts.Flags&^linux.RENAME_NOREPLACE != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if fs.opts.interop == InteropModeShared && opts.Flags&linux.RENAME_NOREPLACE != 0 {
// Requires 9P support to synchronize with other remote filesystem
// users.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
newName := rp.Component()
if newName == "." || newName == ".." {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
mnt := rp.Mount()
if mnt != oldParentVD.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := mnt.CheckBeginWrite(); err != nil {
return err
@@ -1260,7 +1246,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
if renamed.isDir() {
if renamed == newParent || genericIsAncestorDentry(renamed, newParent) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if oldParent != newParent {
if err := renamed.checkPermissions(creds, vfs.MayWrite); err != nil {
@@ -1269,7 +1255,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
} else {
if opts.MustBeDir || rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
@@ -1281,28 +1267,28 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
defer newParent.dirMu.Unlock()
}
if newParent.isDeleted() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
replaced, err := fs.getChildLocked(ctx, newParent, newName, &ds)
- if err != nil && err != syserror.ENOENT {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
return err
}
var replacedVFSD *vfs.Dentry
if replaced != nil {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
replacedVFSD = &replaced.vfsd
if replaced.isDir() {
if !renamed.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if genericIsAncestorDentry(replaced, renamed) {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
} else {
if rp.MustBeDir() || renamed.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
}
@@ -1507,7 +1493,7 @@ func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath
return d.endpoint, nil
}
}
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index cf69e1b7a..43440ec19 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -46,6 +46,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/p9"
@@ -61,7 +62,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/unet"
)
@@ -318,7 +318,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
mfp := pgalloc.MemoryFileProviderFromContext(ctx)
if mfp == nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: context does not provide a pgalloc.MemoryFileProvider")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
mopts := vfs.GenericParseMountOptions(opts.Data)
@@ -354,7 +354,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
fsopts.interop = InteropModeShared
default:
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid cache policy: %s=%s", moptCache, cache)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
}
@@ -365,7 +365,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
dfltuid, err := strconv.ParseUint(dfltuidstr, 10, 32)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltUID, dfltuidstr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// In Linux, dfltuid is interpreted as a UID and is converted to a KUID
// in the caller's user namespace, but goferfs isn't
@@ -378,7 +378,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
dfltgid, err := strconv.ParseUint(dfltgidstr, 10, 32)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltGID, dfltgidstr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.dfltgid = auth.KGID(dfltgid)
}
@@ -390,7 +390,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
msize, err := strconv.ParseUint(msizestr, 10, 32)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid message size: %s=%s", moptMsize, msizestr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.msize = uint32(msize)
}
@@ -409,7 +409,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
maxCachedDentries, err := strconv.ParseUint(str, 10, 64)
if err != nil {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid dentry cache limit: %s=%s", moptDentryCacheLimit, str)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
fsopts.maxCachedDentries = maxCachedDentries
}
@@ -433,14 +433,14 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Check for unparsed options.
if len(mopts) != 0 {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: unknown options: %v", mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Handle internal options.
iopts, ok := opts.InternalData.(InternalFilesystemOptions)
if opts.InternalData != nil && !ok {
ctx.Warningf("gofer.FilesystemType.GetFilesystem: GetFilesystemOptions.InternalData has type %T, wanted gofer.InternalFilesystemOptions", opts.InternalData)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// If !ok, iopts being the zero value is correct.
@@ -503,7 +503,7 @@ func getFDFromMountOptionsMap(ctx context.Context, mopts map[string]string) (int
trans, ok := mopts[moptTransport]
if !ok || trans != transportModeFD {
ctx.Warningf("gofer.getFDFromMountOptionsMap: transport must be specified as '%s=%s'", moptTransport, transportModeFD)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
delete(mopts, moptTransport)
@@ -511,28 +511,28 @@ func getFDFromMountOptionsMap(ctx context.Context, mopts map[string]string) (int
rfdstr, ok := mopts[moptReadFD]
if !ok {
ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD must be specified as '%s=<file descriptor>'", moptReadFD)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
delete(mopts, moptReadFD)
rfd, err := strconv.Atoi(rfdstr)
if err != nil {
ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid read FD: %s=%s", moptReadFD, rfdstr)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
wfdstr, ok := mopts[moptWriteFD]
if !ok {
ctx.Warningf("gofer.getFDFromMountOptionsMap: write FD must be specified as '%s=<file descriptor>'", moptWriteFD)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
delete(mopts, moptWriteFD)
wfd, err := strconv.Atoi(wfdstr)
if err != nil {
ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid write FD: %s=%s", moptWriteFD, wfdstr)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
if rfd != wfd {
ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD (%d) and write FD (%d) must be equal", rfd, wfd)
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
return rfd, nil
}
@@ -581,10 +581,10 @@ func (fs *filesystem) Release(ctx context.Context) {
d.dataMu.Unlock()
// Close host FDs if they exist.
if d.readFD >= 0 {
- unix.Close(int(d.readFD))
+ _ = unix.Close(int(d.readFD))
}
if d.writeFD >= 0 && d.readFD != d.writeFD {
- unix.Close(int(d.writeFD))
+ _ = unix.Close(int(d.writeFD))
}
d.readFD = -1
d.writeFD = -1
@@ -864,11 +864,11 @@ func dentryAttrMask() p9.AttrMask {
func (fs *filesystem) newDentry(ctx context.Context, file p9file, qid p9.QID, mask p9.AttrMask, attr *p9.Attr) (*dentry, error) {
if !mask.Mode {
ctx.Warningf("can't create gofer.dentry without file type")
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
if attr.Mode.FileType() == p9.ModeRegular && !mask.Size {
ctx.Warningf("can't create regular file gofer.dentry without file size")
- return nil, syserror.EIO
+ return nil, linuxerr.EIO
}
d := &dentry{
@@ -946,10 +946,10 @@ func (d *dentry) cachedMetadataAuthoritative() bool {
// updateFromP9Attrs is called to update d's metadata after an update from the
// remote filesystem.
// Precondition: d.metadataMu must be locked.
+// +checklocks:d.metadataMu
func (d *dentry) updateFromP9AttrsLocked(mask p9.AttrMask, attr *p9.Attr) {
if mask.Mode {
if got, want := uint32(attr.Mode.FileType()), d.fileType(); got != want {
- d.metadataMu.Unlock()
panic(fmt.Sprintf("gofer.dentry file type changed from %#o to %#o", want, got))
}
atomic.StoreUint32(&d.mode, uint32(attr.Mode))
@@ -988,13 +988,14 @@ func (d *dentry) updateFromP9AttrsLocked(mask p9.AttrMask, attr *p9.Attr) {
// Preconditions: !d.isSynthetic().
// Preconditions: d.metadataMu is locked.
+// +checklocks:d.metadataMu
func (d *dentry) refreshSizeLocked(ctx context.Context) error {
d.handleMu.RLock()
if d.writeFD < 0 {
d.handleMu.RUnlock()
// Ask the gofer if we don't have a host FD.
- return d.updateFromGetattrLocked(ctx)
+ return d.updateFromGetattrLocked(ctx, p9file{})
}
var stat unix.Statx_t
@@ -1013,37 +1014,41 @@ func (d *dentry) updateFromGetattr(ctx context.Context) error {
// updating stale attributes in d.updateFromP9AttrsLocked().
d.metadataMu.Lock()
defer d.metadataMu.Unlock()
- return d.updateFromGetattrLocked(ctx)
+ return d.updateFromGetattrLocked(ctx, p9file{})
}
// Preconditions:
// * !d.isSynthetic().
// * d.metadataMu is locked.
-func (d *dentry) updateFromGetattrLocked(ctx context.Context) error {
- // Use d.readFile or d.writeFile, which represent 9P FIDs that have been
- // opened, in preference to d.file, which represents a 9P fid that has not.
- // This may be significantly more efficient in some implementations. Prefer
- // d.writeFile over d.readFile since some filesystem implementations may
- // update a writable handle's metadata after writes to that handle, without
- // making metadata updates immediately visible to read-only handles
- // representing the same file.
- d.handleMu.RLock()
- handleMuRLocked := true
- var file p9file
- switch {
- case !d.writeFile.isNil():
- file = d.writeFile
- case !d.readFile.isNil():
- file = d.readFile
- default:
- file = d.file
- d.handleMu.RUnlock()
- handleMuRLocked = false
+// +checklocks:d.metadataMu
+func (d *dentry) updateFromGetattrLocked(ctx context.Context, file p9file) error {
+ handleMuRLocked := false
+ if file.isNil() {
+ // Use d.readFile or d.writeFile, which represent 9P FIDs that have
+ // been opened, in preference to d.file, which represents a 9P fid that
+ // has not. This may be significantly more efficient in some
+ // implementations. Prefer d.writeFile over d.readFile since some
+ // filesystem implementations may update a writable handle's metadata
+ // after writes to that handle, without making metadata updates
+ // immediately visible to read-only handles representing the same file.
+ d.handleMu.RLock()
+ switch {
+ case !d.writeFile.isNil():
+ file = d.writeFile
+ handleMuRLocked = true
+ case !d.readFile.isNil():
+ file = d.readFile
+ handleMuRLocked = true
+ default:
+ file = d.file
+ d.handleMu.RUnlock()
+ }
}
_, attrMask, attr, err := file.getAttr(ctx, dentryAttrMask())
if handleMuRLocked {
- d.handleMu.RUnlock() // must be released before updateFromP9AttrsLocked()
+ // handleMu must be released before updateFromP9AttrsLocked().
+ d.handleMu.RUnlock() // +checklocksforce: complex case.
}
if err != nil {
return err
@@ -1090,7 +1095,7 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
return nil
}
if stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID|linux.STATX_ATIME|linux.STATX_MTIME|linux.STATX_SIZE) != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
mode := linux.FileMode(atomic.LoadUint32(&d.mode))
if err := vfs.CheckSetStat(ctx, creds, opts, mode, auth.KUID(atomic.LoadUint32(&d.uid)), auth.KGID(atomic.LoadUint32(&d.gid))); err != nil {
@@ -1108,9 +1113,9 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
case linux.S_IFREG:
// ok
case linux.S_IFDIR:
- return syserror.EISDIR
+ return linuxerr.EISDIR
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -1157,6 +1162,13 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
if !d.isSynthetic() {
if stat.Mask != 0 {
+ if stat.Mask&linux.STATX_SIZE != 0 {
+ // d.dataMu must be held around the update to both the remote
+ // file's size and d.size to serialize with writeback (which
+ // might otherwise write data back up to the old d.size after
+ // the remote file has been truncated).
+ d.dataMu.Lock()
+ }
if err := d.file.setAttr(ctx, p9.SetAttrMask{
Permissions: stat.Mask&linux.STATX_MODE != 0,
UID: stat.Mask&linux.STATX_UID != 0,
@@ -1176,13 +1188,16 @@ func (d *dentry) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs
MTimeSeconds: uint64(stat.Mtime.Sec),
MTimeNanoSeconds: uint64(stat.Mtime.Nsec),
}); err != nil {
+ if stat.Mask&linux.STATX_SIZE != 0 {
+ d.dataMu.Unlock() // +checklocksforce: locked conditionally above
+ }
return err
}
if stat.Mask&linux.STATX_SIZE != 0 {
// d.size should be kept up to date, and privatized
// copy-on-write mappings of truncated pages need to be
// invalidated, even if InteropModeShared is in effect.
- d.updateSizeLocked(stat.Size)
+ d.updateSizeAndUnlockDataMuLocked(stat.Size) // +checklocksforce: locked conditionally above
}
}
if d.fs.opts.interop == InteropModeShared {
@@ -1245,6 +1260,14 @@ func (d *dentry) doAllocate(ctx context.Context, offset, length uint64, allocate
// Preconditions: d.metadataMu must be locked.
func (d *dentry) updateSizeLocked(newSize uint64) {
d.dataMu.Lock()
+ d.updateSizeAndUnlockDataMuLocked(newSize)
+}
+
+// Preconditions: d.metadataMu and d.dataMu must be locked.
+//
+// Postconditions: d.dataMu is unlocked.
+// +checklocksrelease:d.dataMu
+func (d *dentry) updateSizeAndUnlockDataMuLocked(newSize uint64) {
oldSize := d.size
atomic.StoreUint64(&d.size, newSize)
// d.dataMu must be unlocked to lock d.mapsMu and invalidate mappings
@@ -1253,9 +1276,9 @@ func (d *dentry) updateSizeLocked(newSize uint64) {
// contents beyond the new d.size. (We are still holding d.metadataMu,
// so we can't race with Write or another truncate.)
d.dataMu.Unlock()
- if d.size < oldSize {
+ if newSize < oldSize {
oldpgend, _ := hostarch.PageRoundUp(oldSize)
- newpgend, _ := hostarch.PageRoundUp(d.size)
+ newpgend, _ := hostarch.PageRoundUp(newSize)
if oldpgend != newpgend {
d.mapsMu.Lock()
d.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{
@@ -1271,8 +1294,8 @@ func (d *dentry) updateSizeLocked(newSize uint64) {
// truncated pages have been removed from the remote file, they
// should be dropped without being written back.
d.dataMu.Lock()
- d.cache.Truncate(d.size, d.fs.mfp.MemoryFile())
- d.dirty.KeepClean(memmap.MappableRange{d.size, oldpgend})
+ d.cache.Truncate(newSize, d.fs.mfp.MemoryFile())
+ d.dirty.KeepClean(memmap.MappableRange{newSize, oldpgend})
d.dataMu.Unlock()
}
}
@@ -1288,7 +1311,7 @@ func (d *dentry) checkXattrPermissions(creds *auth.Credentials, name string, ats
// to the remote filesystem. This is inconsistent with Linux's 9p client,
// but consistent with other filesystems (e.g. FUSE).
if strings.HasPrefix(name, linux.XATTR_SECURITY_PREFIX) || strings.HasPrefix(name, linux.XATTR_SYSTEM_PREFIX) {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
mode := linux.FileMode(atomic.LoadUint32(&d.mode))
kuid := auth.KUID(atomic.LoadUint32(&d.uid))
@@ -1469,7 +1492,7 @@ func (d *dentry) checkCachingLocked(ctx context.Context, renameMuWriteLocked boo
if d.isDeleted() {
d.watches.HandleDeletion(ctx)
}
- d.destroyLocked(ctx)
+ d.destroyLocked(ctx) // +checklocksforce: renameMu must be acquired at this point.
return
}
// If d still has inotify watches and it is not deleted or invalidated, it
@@ -1497,7 +1520,7 @@ func (d *dentry) checkCachingLocked(ctx context.Context, renameMuWriteLocked boo
delete(d.parent.children, d.name)
d.parent.dirMu.Unlock()
}
- d.destroyLocked(ctx)
+ d.destroyLocked(ctx) // +checklocksforce: see above.
return
}
@@ -1526,7 +1549,7 @@ func (d *dentry) checkCachingLocked(ctx context.Context, renameMuWriteLocked boo
d.fs.renameMu.Lock()
defer d.fs.renameMu.Unlock()
}
- d.fs.evictCachedDentryLocked(ctx)
+ d.fs.evictCachedDentryLocked(ctx) // +checklocksforce: see above.
}
}
@@ -1543,6 +1566,7 @@ func (d *dentry) removeFromCacheLocked() {
// Precondition: fs.renameMu must be locked for writing; it may be temporarily
// unlocked.
+// +checklocks:fs.renameMu
func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {
for fs.cachedDentriesLen != 0 {
fs.evictCachedDentryLocked(ctx)
@@ -1551,6 +1575,7 @@ func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {
// Preconditions:
// * fs.renameMu must be locked for writing; it may be temporarily unlocked.
+// +checklocks:fs.renameMu
func (fs *filesystem) evictCachedDentryLocked(ctx context.Context) {
fs.cacheMu.Lock()
victim := fs.cachedDentries.Back()
@@ -1587,7 +1612,7 @@ func (fs *filesystem) evictCachedDentryLocked(ctx context.Context) {
// will try to acquire fs.renameMu (which we have already acquired). Hence,
// fs.renameMu will synchronize the destroy attempts.
victim.cachingMu.Unlock()
- victim.destroyLocked(ctx)
+ victim.destroyLocked(ctx) // +checklocksforce: owned as precondition, victim.fs == fs.
}
// destroyLocked destroys the dentry.
@@ -1597,6 +1622,7 @@ func (fs *filesystem) evictCachedDentryLocked(ctx context.Context) {
// * d.refs == 0.
// * d.parent.children[d.name] != d, i.e. d is not reachable by path traversal
// from its former parent dentry.
+// +checklocks:d.fs.renameMu
func (d *dentry) destroyLocked(ctx context.Context) {
switch atomic.LoadInt64(&d.refs) {
case 0:
@@ -1630,18 +1656,18 @@ func (d *dentry) destroyLocked(ctx context.Context) {
d.dataMu.Unlock()
// Clunk open fids and close open host FDs.
if !d.readFile.isNil() {
- d.readFile.close(ctx)
+ _ = d.readFile.close(ctx)
}
if !d.writeFile.isNil() && d.readFile != d.writeFile {
- d.writeFile.close(ctx)
+ _ = d.writeFile.close(ctx)
}
d.readFile = p9file{}
d.writeFile = p9file{}
if d.readFD >= 0 {
- unix.Close(int(d.readFD))
+ _ = unix.Close(int(d.readFD))
}
if d.writeFD >= 0 && d.readFD != d.writeFD {
- unix.Close(int(d.writeFD))
+ _ = unix.Close(int(d.writeFD))
}
d.readFD = -1
d.writeFD = -1
@@ -1703,7 +1729,7 @@ func (d *dentry) listXattr(ctx context.Context, creds *auth.Credentials, size ui
func (d *dentry) getXattr(ctx context.Context, creds *auth.Credentials, opts *vfs.GetXattrOptions) (string, error) {
if d.file.isNil() {
- return "", syserror.ENODATA
+ return "", linuxerr.ENODATA
}
if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayRead); err != nil {
return "", err
@@ -1713,7 +1739,7 @@ func (d *dentry) getXattr(ctx context.Context, creds *auth.Credentials, opts *vf
func (d *dentry) setXattr(ctx context.Context, creds *auth.Credentials, opts *vfs.SetXattrOptions) error {
if d.file.isNil() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := d.checkXattrPermissions(creds, opts.Name, vfs.MayWrite); err != nil {
return err
@@ -1723,7 +1749,7 @@ func (d *dentry) setXattr(ctx context.Context, creds *auth.Credentials, opts *vf
func (d *dentry) removeXattr(ctx context.Context, creds *auth.Credentials, name string) error {
if d.file.isNil() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := d.checkXattrPermissions(creds, name, vfs.MayWrite); err != nil {
return err
@@ -1763,7 +1789,7 @@ func (d *dentry) ensureSharedHandle(ctx context.Context, read, write, trunc bool
openReadable := !d.readFile.isNil() || read
openWritable := !d.writeFile.isNil() || write
h, err := openHandle(ctx, d.file, openReadable, openWritable, trunc)
- if err == syserror.EACCES && (openReadable != read || openWritable != write) {
+ if linuxerr.Equals(linuxerr.EACCES, err) && (openReadable != read || openWritable != write) {
// It may not be possible to use a single handle for both
// reading and writing, since permissions on the file may have
// changed to e.g. disallow reading after previously being
@@ -2020,9 +2046,17 @@ func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linu
d := fd.dentry()
const validMask = uint32(linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME | linux.STATX_SIZE | linux.STATX_BLOCKS | linux.STATX_BTIME)
if !d.cachedMetadataAuthoritative() && opts.Mask&validMask != 0 && opts.Sync != linux.AT_STATX_DONT_SYNC {
- // TODO(jamieliu): Use specialFileFD.handle.file for the getattr if
- // available?
- if err := d.updateFromGetattr(ctx); err != nil {
+ // Use specialFileFD.handle.file for the getattr if available, for the
+ // same reason that we try to use open file handles in
+ // dentry.updateFromGetattrLocked().
+ var file p9file
+ if sffd, ok := fd.vfsfd.Impl().(*specialFileFD); ok {
+ file = sffd.handle.file
+ }
+ d.metadataMu.Lock()
+ err := d.updateFromGetattrLocked(ctx, file)
+ d.metadataMu.Unlock()
+ if err != nil {
return linux.Statx{}, err
}
}
diff --git a/pkg/sentry/fsimpl/gofer/handle.go b/pkg/sentry/fsimpl/gofer/handle.go
index 5c57f6fea..02540a754 100644
--- a/pkg/sentry/fsimpl/gofer/handle.go
+++ b/pkg/sentry/fsimpl/gofer/handle.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/hostfd"
+ "gvisor.dev/gvisor/pkg/sync"
)
// handle represents a remote "open file descriptor", consisting of an opened
@@ -130,3 +131,43 @@ func (h *handle) writeFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, o
}
return uint64(n), cperr
}
+
+type handleReadWriter struct {
+ ctx context.Context
+ h *handle
+ off uint64
+}
+
+var handleReadWriterPool = sync.Pool{
+ New: func() interface{} {
+ return &handleReadWriter{}
+ },
+}
+
+func getHandleReadWriter(ctx context.Context, h *handle, offset int64) *handleReadWriter {
+ rw := handleReadWriterPool.Get().(*handleReadWriter)
+ rw.ctx = ctx
+ rw.h = h
+ rw.off = uint64(offset)
+ return rw
+}
+
+func putHandleReadWriter(rw *handleReadWriter) {
+ rw.ctx = nil
+ rw.h = nil
+ handleReadWriterPool.Put(rw)
+}
+
+// ReadToBlocks implements safemem.Reader.ReadToBlocks.
+func (rw *handleReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) {
+ n, err := rw.h.readToBlocksAt(rw.ctx, dsts, rw.off)
+ rw.off += n
+ return n, err
+}
+
+// WriteFromBlocks implements safemem.Writer.WriteFromBlocks.
+func (rw *handleReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) {
+ n, err := rw.h.writeFromBlocksAt(rw.ctx, srcs, rw.off)
+ rw.off += n
+ return n, err
+}
diff --git a/pkg/sentry/fsimpl/gofer/host_named_pipe.go b/pkg/sentry/fsimpl/gofer/host_named_pipe.go
index c7bf10007..505916a57 100644
--- a/pkg/sentry/fsimpl/gofer/host_named_pipe.go
+++ b/pkg/sentry/fsimpl/gofer/host_named_pipe.go
@@ -21,7 +21,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// Global pipe used by blockUntilNonblockingPipeHasWriter since we can't create
@@ -78,7 +78,7 @@ func nonblockingPipeHasWriter(fd int32) (bool, error) {
defer tempPipeMu.Unlock()
// Copy 1 byte from fd into the temporary pipe.
n, err := unix.Tee(int(fd), tempPipeWriteFD, 1, unix.SPLICE_F_NONBLOCK)
- if err == syserror.EAGAIN {
+ if linuxerr.Equals(linuxerr.EAGAIN, err) {
// The pipe represented by fd is empty, but has a writer.
return true, nil
}
@@ -108,6 +108,6 @@ func sleepBetweenNamedPipeOpenChecks(ctx context.Context) error {
return nil
case <-cancel:
ctx.SleepFinish(false)
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
}
}
diff --git a/pkg/sentry/fsimpl/gofer/p9file.go b/pkg/sentry/fsimpl/gofer/p9file.go
index b0a429d42..5a3ddfc9d 100644
--- a/pkg/sentry/fsimpl/gofer/p9file.go
+++ b/pkg/sentry/fsimpl/gofer/p9file.go
@@ -16,9 +16,9 @@ package gofer
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/p9"
- "gvisor.dev/gvisor/pkg/syserror"
)
// p9file is a wrapper around p9.File that provides methods that are
@@ -59,7 +59,7 @@ func (f p9file) walkGetAttrOne(ctx context.Context, name string) (p9.QID, p9file
if newfile != nil {
p9file{newfile}.close(ctx)
}
- return p9.QID{}, p9file{}, p9.AttrMask{}, p9.Attr{}, syserror.EIO
+ return p9.QID{}, p9file{}, p9.AttrMask{}, p9.Attr{}, linuxerr.EIO
}
return qids[0], p9file{newfile}, attrMask, attr, nil
}
diff --git a/pkg/sentry/fsimpl/gofer/regular_file.go b/pkg/sentry/fsimpl/gofer/regular_file.go
index eed05e369..947dbe05f 100644
--- a/pkg/sentry/fsimpl/gofer/regular_file.go
+++ b/pkg/sentry/fsimpl/gofer/regular_file.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
@@ -34,7 +35,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -79,17 +79,22 @@ func (fd *regularFileFD) OnClose(ctx context.Context) error {
if !fd.vfsfd.IsWritable() {
return nil
}
- // Skip flushing if there are client-buffered writes, since (as with the
- // VFS1 client) we don't flush buffered writes on close anyway.
d := fd.dentry()
- if d.fs.opts.interop != InteropModeExclusive {
- return nil
- }
- d.dataMu.RLock()
- haveDirtyPages := !d.dirty.IsEmpty()
- d.dataMu.RUnlock()
- if haveDirtyPages {
- return nil
+ if d.fs.opts.interop == InteropModeExclusive {
+ // d may have dirty pages that we won't write back now (and wouldn't
+ // have in VFS1), making a flushf RPC ineffective. If this is the case,
+ // skip the flushf.
+ //
+ // Note that it's also possible to have dirty pages under other interop
+ // modes if forcePageCache is in effect; we conservatively assume that
+ // applications have some way of tolerating this and still want the
+ // flushf.
+ d.dataMu.RLock()
+ haveDirtyPages := !d.dirty.IsEmpty()
+ d.dataMu.RUnlock()
+ if haveDirtyPages {
+ return nil
+ }
}
d.handleMu.RLock()
defer d.handleMu.RUnlock()
@@ -124,14 +129,14 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
}()
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
// Check for reading at EOF before calling into MM (but not under
@@ -194,14 +199,14 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off
// offset should be ignored by PWrite.
func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {
if offset < 0 {
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select pwritev2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, offset, syserror.EOPNOTSUPP
+ return 0, offset, linuxerr.EOPNOTSUPP
}
d := fd.dentry()
@@ -297,7 +302,7 @@ func (fd *regularFileFD) writeCache(ctx context.Context, d *dentry, offset int64
pgstart := hostarch.PageRoundDown(uint64(offset))
pgend, ok := hostarch.PageRoundUp(uint64(offset + src.NumBytes()))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mr := memmap.MappableRange{pgstart, pgend}
var freed []memmap.FileRange
@@ -652,20 +657,20 @@ func regularFileSeekLocked(ctx context.Context, d *dentry, fdOffset, offset int6
offset += size
case linux.SEEK_DATA:
if offset > size {
- return 0, syserror.ENXIO
+ return 0, linuxerr.ENXIO
}
// Use offset as specified.
case linux.SEEK_HOLE:
if offset > size {
- return 0, syserror.ENXIO
+ return 0, linuxerr.ENXIO
}
offset = size
}
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
return offset, nil
}
@@ -678,28 +683,28 @@ func (fd *regularFileFD) Sync(ctx context.Context) error {
// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
d := fd.dentry()
- switch d.fs.opts.interop {
- case InteropModeExclusive:
- // Any mapping is fine.
- case InteropModeWritethrough:
- // Shared writable mappings require a host FD, since otherwise we can't
- // synchronously flush memory-mapped writes to the remote file.
- if opts.Private || !opts.MaxPerms.Write {
- break
- }
- fallthrough
- case InteropModeShared:
- // All mappings require a host FD to be coherent with other filesystem
- // users.
- if d.fs.opts.forcePageCache {
- // Whether or not we have a host FD, we're not allowed to use it.
- return syserror.ENODEV
- }
- if atomic.LoadInt32(&d.mmapFD) < 0 {
- return syserror.ENODEV
+ // Force sentry page caching at your own risk.
+ if !d.fs.opts.forcePageCache {
+ switch d.fs.opts.interop {
+ case InteropModeExclusive:
+ // Any mapping is fine.
+ case InteropModeWritethrough:
+ // Shared writable mappings require a host FD, since otherwise we
+ // can't synchronously flush memory-mapped writes to the remote
+ // file.
+ if opts.Private || !opts.MaxPerms.Write {
+ break
+ }
+ fallthrough
+ case InteropModeShared:
+ // All mappings require a host FD to be coherent with other
+ // filesystem users.
+ if atomic.LoadInt32(&d.mmapFD) < 0 {
+ return linuxerr.ENODEV
+ }
+ default:
+ panic(fmt.Sprintf("unknown InteropMode %v", d.fs.opts.interop))
}
- default:
- panic(fmt.Sprintf("unknown InteropMode %v", d.fs.opts.interop))
}
// After this point, d may be used as a memmap.Mappable.
d.pf.hostFileMapperInitOnce.Do(d.pf.hostFileMapper.Init)
@@ -707,14 +712,8 @@ func (fd *regularFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpt
return vfs.GenericConfigureMMap(&fd.vfsfd, d, opts)
}
-func (d *dentry) mayCachePages() bool {
- if d.fs.opts.interop == InteropModeShared {
- return false
- }
- if d.fs.opts.forcePageCache {
- return true
- }
- return atomic.LoadInt32(&d.mmapFD) >= 0
+func (fs *filesystem) mayCachePagesInMemoryFile() bool {
+ return fs.opts.forcePageCache || fs.opts.interop != InteropModeShared
}
// AddMapping implements memmap.Mappable.AddMapping.
@@ -726,7 +725,7 @@ func (d *dentry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar host
for _, r := range mapped {
d.pf.hostFileMapper.IncRefOn(r)
}
- if d.mayCachePages() {
+ if d.fs.mayCachePagesInMemoryFile() {
// d.Evict() will refuse to evict memory-mapped pages, so tell the
// MemoryFile to not bother trying.
mf := d.fs.mfp.MemoryFile()
@@ -745,7 +744,7 @@ func (d *dentry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar h
for _, r := range unmapped {
d.pf.hostFileMapper.DecRefOn(r)
}
- if d.mayCachePages() {
+ if d.fs.mayCachePagesInMemoryFile() {
// Pages that are no longer referenced by any application memory
// mappings are now considered unused; allow MemoryFile to evict them
// when necessary.
diff --git a/pkg/sentry/fsimpl/gofer/revalidate.go b/pkg/sentry/fsimpl/gofer/revalidate.go
index 8f81f0822..226790a11 100644
--- a/pkg/sentry/fsimpl/gofer/revalidate.go
+++ b/pkg/sentry/fsimpl/gofer/revalidate.go
@@ -247,16 +247,16 @@ func (fs *filesystem) revalidateHelper(ctx context.Context, vfsObj *vfs.VirtualF
if found && !d.isSynthetic() {
// First dentry is where the search is starting, just update attributes
// since it cannot be replaced.
- d.updateFromP9AttrsLocked(stats[i].Valid, &stats[i].Attr)
+ d.updateFromP9AttrsLocked(stats[i].Valid, &stats[i].Attr) // +checklocksforce: acquired by lockAllMetadata.
}
- d.metadataMu.Unlock()
+ d.metadataMu.Unlock() // +checklocksforce: see above.
continue
}
// Note that synthetic dentries will always fails the comparison check
// below.
if !found || d.qidPath != stats[i].QID.Path {
- d.metadataMu.Unlock()
+ d.metadataMu.Unlock() // +checklocksforce: see above.
if !found && d.isSynthetic() {
// We have a synthetic file, and no remote file has arisen to replace
// it.
@@ -298,7 +298,7 @@ func (fs *filesystem) revalidateHelper(ctx context.Context, vfsObj *vfs.VirtualF
}
// The file at this path hasn't changed. Just update cached metadata.
- d.updateFromP9AttrsLocked(stats[i].Valid, &stats[i].Attr)
+ d.updateFromP9AttrsLocked(stats[i].Valid, &stats[i].Attr) // +checklocksforce: see above.
d.metadataMu.Unlock()
}
@@ -354,6 +354,7 @@ func (r *revalidateState) add(name string, d *dentry) {
r.dentries = append(r.dentries, d)
}
+// +checklocksignore
func (r *revalidateState) lockAllMetadata() {
for _, d := range r.dentries {
d.metadataMu.Lock()
@@ -372,6 +373,7 @@ func (r *revalidateState) popFront() *dentry {
// reset releases all metadata locks and resets all fields to allow this
// instance to be reused.
+// +checklocksignore
func (r *revalidateState) reset() {
if r.locked {
// Unlock any remaining dentries.
diff --git a/pkg/sentry/fsimpl/gofer/save_restore.go b/pkg/sentry/fsimpl/gofer/save_restore.go
index 83e841a51..8dcbc61ed 100644
--- a/pkg/sentry/fsimpl/gofer/save_restore.go
+++ b/pkg/sentry/fsimpl/gofer/save_restore.go
@@ -21,13 +21,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
type saveRestoreContextID int
@@ -92,7 +92,7 @@ func (fd *specialFileFD) savePipeData(ctx context.Context) error {
fd.buf = append(fd.buf, buf[:n]...)
}
if err != nil {
- if err == io.EOF || err == syserror.EAGAIN {
+ if err == io.EOF || linuxerr.Equals(linuxerr.EAGAIN, err) {
break
}
return err
@@ -158,6 +158,10 @@ func (d *dentryPlatformFile) afterLoad() {
// afterLoad is invoked by stateify.
func (fd *specialFileFD) afterLoad() {
fd.handle.fd = -1
+ if fd.hostFileMapper.IsInited() {
+ // Ensure that we don't call fd.hostFileMapper.Init() again.
+ fd.hostFileMapperInitOnce.Do(func() {})
+ }
}
// CompleteRestore implements
diff --git a/pkg/sentry/fsimpl/gofer/special_file.go b/pkg/sentry/fsimpl/gofer/special_file.go
index c12444b7e..a8d47b65b 100644
--- a/pkg/sentry/fsimpl/gofer/special_file.go
+++ b/pkg/sentry/fsimpl/gofer/special_file.go
@@ -20,14 +20,17 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/p9"
"gvisor.dev/gvisor/pkg/safemem"
+ "gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
+ "gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -41,6 +44,11 @@ import (
type specialFileFD struct {
fileDescription
+ // releaseMu synchronizes the closing of fd.handle with fd.sync(). It's safe
+ // to access fd.handle without locking for operations that require a ref to
+ // be held by the caller, e.g. vfs.FileDescriptionImpl implementations.
+ releaseMu sync.RWMutex `state:"nosave"`
+
// handle is used for file I/O. handle is immutable.
handle handle `state:"nosave"`
@@ -70,6 +78,16 @@ type specialFileFD struct {
bufMu sync.Mutex `state:"nosave"`
haveBuf uint32
buf []byte
+
+ // If handle.fd >= 0, hostFileMapper caches mappings of handle.fd, and
+ // hostFileMapperInitOnce is used to initialize it on first use.
+ hostFileMapperInitOnce sync.Once `state:"nosave"`
+ hostFileMapper fsutil.HostFileMapper
+
+ // If handle.fd >= 0, fileRefs counts references on memmap.File offsets.
+ // fileRefs is protected by fileRefsMu.
+ fileRefsMu sync.Mutex `state:"nosave"`
+ fileRefs fsutil.FrameRefSet
}
func newSpecialFileFD(h handle, mnt *vfs.Mount, d *dentry, flags uint32) (*specialFileFD, error) {
@@ -116,7 +134,10 @@ func (fd *specialFileFD) Release(ctx context.Context) {
if fd.haveQueue {
fdnotifier.RemoveFD(fd.handle.fd)
}
+ fd.releaseMu.Lock()
fd.handle.close(ctx)
+ fd.releaseMu.Unlock()
+
fs := fd.vfsfd.Mount().Filesystem().Impl().(*filesystem)
fs.syncMu.Lock()
delete(fs.specialFileFDs, fd)
@@ -183,14 +204,14 @@ func (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
}()
if fd.seekable && offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
if d := fd.dentry(); d.cachedMetadataAuthoritative() {
@@ -221,23 +242,13 @@ func (fd *specialFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
}
}
- // Going through dst.CopyOutFrom() would hold MM locks around file
- // operations of unknown duration. For regularFileFD, doing so is necessary
- // to support mmap due to lock ordering; MM locks precede dentry.dataMu.
- // That doesn't hold here since specialFileFD doesn't client-cache data.
- // Just buffer the read instead.
- buf := make([]byte, dst.NumBytes())
- n, err := fd.handle.readToBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf)), uint64(offset))
- if err == syserror.EAGAIN {
- err = syserror.ErrWouldBlock
- }
- if n == 0 {
- return bufN, err
+ rw := getHandleReadWriter(ctx, &fd.handle, offset)
+ n, err := dst.CopyOutFrom(ctx, rw)
+ putHandleReadWriter(rw)
+ if linuxerr.Equals(linuxerr.EAGAIN, err) {
+ err = linuxerr.ErrWouldBlock
}
- if cp, cperr := dst.CopyOut(ctx, buf[:n]); cperr != nil {
- return bufN + int64(cp), cperr
- }
- return bufN + int64(n), err
+ return bufN + n, err
}
// Read implements vfs.FileDescriptionImpl.Read.
@@ -263,14 +274,14 @@ func (fd *specialFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off
// offset should be ignored by PWrite.
func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {
if fd.seekable && offset < 0 {
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
// Check that flags are supported.
//
// TODO(gvisor.dev/issue/2601): Support select pwritev2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, offset, syserror.EOPNOTSUPP
+ return 0, offset, linuxerr.EOPNOTSUPP
}
d := fd.dentry()
@@ -308,20 +319,15 @@ func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
}
}
- // Do a buffered write. See rationale in PRead.
- buf := make([]byte, src.NumBytes())
- copied, copyErr := src.CopyIn(ctx, buf)
- if copied == 0 && copyErr != nil {
- // Only return the error if we didn't get any data.
- return 0, offset, copyErr
- }
- n, err := fd.handle.writeFromBlocksAt(ctx, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:copied])), uint64(offset))
- if err == syserror.EAGAIN {
- err = syserror.ErrWouldBlock
+ rw := getHandleReadWriter(ctx, &fd.handle, offset)
+ n, err := src.CopyInTo(ctx, rw)
+ putHandleReadWriter(rw)
+ if linuxerr.Equals(linuxerr.EAGAIN, err) {
+ err = linuxerr.ErrWouldBlock
}
// Update offset if the offset is valid.
if offset >= 0 {
- offset += int64(n)
+ offset += n
}
// Update file size for regular files.
if fd.isRegularFile {
@@ -332,10 +338,7 @@ func (fd *specialFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
atomic.StoreUint64(&d.size, uint64(offset))
}
}
- if err != nil {
- return int64(n), offset, err
- }
- return int64(n), offset, copyErr
+ return int64(n), offset, err
}
// Write implements vfs.FileDescriptionImpl.Write.
@@ -354,7 +357,7 @@ func (fd *specialFileFD) Write(ctx context.Context, src usermem.IOSequence, opts
// Seek implements vfs.FileDescriptionImpl.Seek.
func (fd *specialFileFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
if !fd.seekable {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
fd.mu.Lock()
defer fd.mu.Unlock()
@@ -372,6 +375,13 @@ func (fd *specialFileFD) Sync(ctx context.Context) error {
}
func (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool) error {
+ // Locks to ensure it didn't race with fd.Release().
+ fd.releaseMu.RLock()
+ defer fd.releaseMu.RUnlock()
+
+ if !fd.handle.isOpen() {
+ return nil
+ }
err := func() error {
// If we have a host FD, fsyncing it is likely to be faster than an fsync
// RPC.
@@ -396,3 +406,85 @@ func (fd *specialFileFD) sync(ctx context.Context, forFilesystemSync bool) error
}
return nil
}
+
+// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
+func (fd *specialFileFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
+ if fd.handle.fd < 0 || fd.filesystem().opts.forcePageCache {
+ return linuxerr.ENODEV
+ }
+ // After this point, fd may be used as a memmap.Mappable and memmap.File.
+ fd.hostFileMapperInitOnce.Do(fd.hostFileMapper.Init)
+ return vfs.GenericConfigureMMap(&fd.vfsfd, fd, opts)
+}
+
+// AddMapping implements memmap.Mappable.AddMapping.
+func (fd *specialFileFD) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error {
+ fd.hostFileMapper.IncRefOn(memmap.MappableRange{offset, offset + uint64(ar.Length())})
+ return nil
+}
+
+// RemoveMapping implements memmap.Mappable.RemoveMapping.
+func (fd *specialFileFD) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) {
+ fd.hostFileMapper.DecRefOn(memmap.MappableRange{offset, offset + uint64(ar.Length())})
+}
+
+// CopyMapping implements memmap.Mappable.CopyMapping.
+func (fd *specialFileFD) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error {
+ return fd.AddMapping(ctx, ms, dstAR, offset, writable)
+}
+
+// Translate implements memmap.Mappable.Translate.
+func (fd *specialFileFD) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
+ mr := optional
+ if fd.filesystem().opts.limitHostFDTranslation {
+ mr = maxFillRange(required, optional)
+ }
+ return []memmap.Translation{
+ {
+ Source: mr,
+ File: fd,
+ Offset: mr.Start,
+ Perms: hostarch.AnyAccess,
+ },
+ }, nil
+}
+
+// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable.
+func (fd *specialFileFD) InvalidateUnsavable(ctx context.Context) error {
+ return nil
+}
+
+// IncRef implements memmap.File.IncRef.
+func (fd *specialFileFD) IncRef(fr memmap.FileRange) {
+ fd.fileRefsMu.Lock()
+ defer fd.fileRefsMu.Unlock()
+ fd.fileRefs.IncRefAndAccount(fr)
+}
+
+// DecRef implements memmap.File.DecRef.
+func (fd *specialFileFD) DecRef(fr memmap.FileRange) {
+ fd.fileRefsMu.Lock()
+ defer fd.fileRefsMu.Unlock()
+ fd.fileRefs.DecRefAndAccount(fr)
+}
+
+// MapInternal implements memmap.File.MapInternal.
+func (fd *specialFileFD) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (safemem.BlockSeq, error) {
+ fd.requireHostFD()
+ return fd.hostFileMapper.MapInternal(fr, int(fd.handle.fd), at.Write)
+}
+
+// FD implements memmap.File.FD.
+func (fd *specialFileFD) FD() int {
+ fd.requireHostFD()
+ return int(fd.handle.fd)
+}
+
+func (fd *specialFileFD) requireHostFD() {
+ if fd.handle.fd < 0 {
+ // This is possible if fd was successfully mmapped before saving, then
+ // was restored without a host FD. This is unrecoverable: without a
+ // host FD, we can't mmap this file post-restore.
+ panic("gofer.specialFileFD can no longer be memory-mapped without a host FD")
+ }
+}
diff --git a/pkg/sentry/fsimpl/gofer/symlink.go b/pkg/sentry/fsimpl/gofer/symlink.go
index 2ec819f86..dbd834c67 100644
--- a/pkg/sentry/fsimpl/gofer/symlink.go
+++ b/pkg/sentry/fsimpl/gofer/symlink.go
@@ -41,7 +41,7 @@ func (d *dentry) readlink(ctx context.Context, mnt *vfs.Mount) (string, error) {
d.haveTarget = true
d.target = target
}
- d.dataMu.Unlock()
+ d.dataMu.Unlock() // +checklocksforce: guaranteed locked from above.
}
return target, err
}
diff --git a/pkg/sentry/fsimpl/host/BUILD b/pkg/sentry/fsimpl/host/BUILD
index b94dfeb7f..180a35583 100644
--- a/pkg/sentry/fsimpl/host/BUILD
+++ b/pkg/sentry/fsimpl/host/BUILD
@@ -45,10 +45,10 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fdnotifier",
"//pkg/fspath",
"//pkg/hostarch",
- "//pkg/iovec",
"//pkg/log",
"//pkg/marshal/primitive",
"//pkg/refs",
@@ -70,7 +70,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/unet",
"//pkg/usermem",
diff --git a/pkg/sentry/fsimpl/host/host.go b/pkg/sentry/fsimpl/host/host.go
index a81f550b1..984c6e8ee 100644
--- a/pkg/sentry/fsimpl/host/host.go
+++ b/pkg/sentry/fsimpl/host/host.go
@@ -24,6 +24,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
@@ -36,11 +37,40 @@ import (
unixsocket "gvisor.dev/gvisor/pkg/sentry/socket/unix"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
+// These are the modes that are stored with virtualOwner.
+const virtualOwnerModes = linux.STATX_MODE | linux.STATX_UID | linux.STATX_GID
+
+// +stateify savable
+type virtualOwner struct {
+ // This field is initialized at creation time and is immutable.
+ enabled bool
+
+ // mu protects the fields below and they can be accessed using atomic memory
+ // operations.
+ mu sync.Mutex `state:"nosave"`
+ uid uint32
+ gid uint32
+ // mode is also stored, otherwise setting the host file to `0000` could remove
+ // access to the file.
+ mode uint32
+}
+
+func (v *virtualOwner) atomicUID() uint32 {
+ return atomic.LoadUint32(&v.uid)
+}
+
+func (v *virtualOwner) atomicGID() uint32 {
+ return atomic.LoadUint32(&v.gid)
+}
+
+func (v *virtualOwner) atomicMode() uint32 {
+ return atomic.LoadUint32(&v.mode)
+}
+
// inode implements kernfs.Inode.
//
// +stateify savable
@@ -97,6 +127,11 @@ type inode struct {
// Event queue for blocking operations.
queue waiter.Queue
+ // virtualOwner caches ownership and permission information to override the
+ // underlying file owner and permission. This is used to allow the unstrusted
+ // application to change these fields without affecting the host.
+ virtualOwner virtualOwner
+
// If haveBuf is non-zero, hostFD represents a pipe, and buf contains data
// read from the pipe from previous calls to inode.beforeSave(). haveBuf
// and buf are protected by bufMu. haveBuf is accessed using atomic memory
@@ -109,12 +144,12 @@ type inode struct {
func newInode(ctx context.Context, fs *filesystem, hostFD int, savable bool, fileType linux.FileMode, isTTY bool) (*inode, error) {
// Determine if hostFD is seekable.
_, err := unix.Seek(hostFD, 0, linux.SEEK_CUR)
- seekable := err != syserror.ESPIPE
+ seekable := !linuxerr.Equals(linuxerr.ESPIPE, err)
// We expect regular files to be seekable, as this is required for them to
// be memory-mappable.
if !seekable && fileType == unix.S_IFREG {
ctx.Infof("host.newInode: host FD %d is a non-seekable regular file", hostFD)
- return nil, syserror.ESPIPE
+ return nil, linuxerr.ESPIPE
}
i := &inode{
@@ -146,7 +181,7 @@ func newInode(ctx context.Context, fs *filesystem, hostFD int, savable bool, fil
type NewFDOptions struct {
// If Savable is true, the host file descriptor may be saved/restored by
// numeric value; the sandbox API requires a corresponding host FD with the
- // same numeric value to be provieded at time of restore.
+ // same numeric value to be provided at time of restore.
Savable bool
// If IsTTY is true, the file descriptor is a TTY.
@@ -156,6 +191,12 @@ type NewFDOptions struct {
// the new file description will inherit flags from hostFD.
HaveFlags bool
Flags uint32
+
+ // VirtualOwner allow the host file to have owner and permissions different
+ // than the underlying host file.
+ VirtualOwner bool
+ UID auth.KUID
+ GID auth.KGID
}
// NewFD returns a vfs.FileDescription representing the given host file
@@ -167,8 +208,8 @@ func NewFD(ctx context.Context, mnt *vfs.Mount, hostFD int, opts *NewFDOptions)
}
// Retrieve metadata.
- var s unix.Stat_t
- if err := unix.Fstat(hostFD, &s); err != nil {
+ var stat unix.Stat_t
+ if err := unix.Fstat(hostFD, &stat); err != nil {
return nil, err
}
@@ -182,11 +223,19 @@ func NewFD(ctx context.Context, mnt *vfs.Mount, hostFD int, opts *NewFDOptions)
flags = uint32(flagsInt)
}
- d := &kernfs.Dentry{}
- i, err := newInode(ctx, fs, hostFD, opts.Savable, linux.FileMode(s.Mode).FileType(), opts.IsTTY)
+ fileType := linux.FileMode(stat.Mode).FileType()
+ i, err := newInode(ctx, fs, hostFD, opts.Savable, fileType, opts.IsTTY)
if err != nil {
return nil, err
}
+ if opts.VirtualOwner {
+ i.virtualOwner.enabled = true
+ i.virtualOwner.uid = uint32(opts.UID)
+ i.virtualOwner.gid = uint32(opts.GID)
+ i.virtualOwner.mode = stat.Mode
+ }
+
+ d := &kernfs.Dentry{}
d.Init(&fs.Filesystem, i)
// i.open will take a reference on d.
@@ -195,15 +244,7 @@ func NewFD(ctx context.Context, mnt *vfs.Mount, hostFD int, opts *NewFDOptions)
// For simplicity, fileDescription.offset is set to 0. Technically, we
// should only set to 0 on files that are not seekable (sockets, pipes,
// etc.), and use the offset from the host fd otherwise when importing.
- return i.open(ctx, d, mnt, flags)
-}
-
-// ImportFD sets up and returns a vfs.FileDescription from a donated fd.
-func ImportFD(ctx context.Context, mnt *vfs.Mount, hostFD int, isTTY bool) (*vfs.FileDescription, error) {
- return NewFD(ctx, mnt, hostFD, &NewFDOptions{
- Savable: true,
- IsTTY: isTTY,
- })
+ return i.open(ctx, d, mnt, fileType, flags)
}
// filesystemType implements vfs.FilesystemType.
@@ -269,7 +310,7 @@ func (fs *filesystem) MountOptions() string {
// CheckPermissions implements kernfs.Inode.CheckPermissions.
func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, ats vfs.AccessTypes) error {
var s unix.Stat_t
- if err := unix.Fstat(i.hostFD, &s); err != nil {
+ if err := i.stat(&s); err != nil {
return err
}
return vfs.GenericCheckPermissions(creds, ats, linux.FileMode(s.Mode), auth.KUID(s.Uid), auth.KGID(s.Gid))
@@ -278,7 +319,7 @@ func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, a
// Mode implements kernfs.Inode.Mode.
func (i *inode) Mode() linux.FileMode {
var s unix.Stat_t
- if err := unix.Fstat(i.hostFD, &s); err != nil {
+ if err := i.stat(&s); err != nil {
// Retrieving the mode from the host fd using fstat(2) should not fail.
// If the syscall does not succeed, something is fundamentally wrong.
panic(fmt.Sprintf("failed to retrieve mode from host fd %d: %v", i.hostFD, err))
@@ -289,10 +330,10 @@ func (i *inode) Mode() linux.FileMode {
// Stat implements kernfs.Inode.Stat.
func (i *inode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOptions) (linux.Statx, error) {
if opts.Mask&linux.STATX__RESERVED != 0 {
- return linux.Statx{}, syserror.EINVAL
+ return linux.Statx{}, linuxerr.EINVAL
}
if opts.Sync&linux.AT_STATX_SYNC_TYPE == linux.AT_STATX_SYNC_TYPE {
- return linux.Statx{}, syserror.EINVAL
+ return linux.Statx{}, linuxerr.EINVAL
}
fs := vfsfs.Impl().(*filesystem)
@@ -301,11 +342,11 @@ func (i *inode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOp
mask := opts.Mask & linux.STATX_ALL
var s unix.Statx_t
err := unix.Statx(i.hostFD, "", int(unix.AT_EMPTY_PATH|opts.Sync), int(mask), &s)
- if err == syserror.ENOSYS {
+ if linuxerr.Equals(linuxerr.ENOSYS, err) {
// Fallback to fstat(2), if statx(2) is not supported on the host.
//
// TODO(b/151263641): Remove fallback.
- return i.fstat(fs)
+ return i.statxFromStat(fs)
}
if err != nil {
return linux.Statx{}, err
@@ -329,19 +370,35 @@ func (i *inode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOp
// device numbers.
ls.Mask |= s.Mask & linux.STATX_ALL
if s.Mask&linux.STATX_TYPE != 0 {
- ls.Mode |= s.Mode & linux.S_IFMT
+ if i.virtualOwner.enabled {
+ ls.Mode |= uint16(i.virtualOwner.atomicMode()) & linux.S_IFMT
+ } else {
+ ls.Mode |= s.Mode & linux.S_IFMT
+ }
}
if s.Mask&linux.STATX_MODE != 0 {
- ls.Mode |= s.Mode &^ linux.S_IFMT
+ if i.virtualOwner.enabled {
+ ls.Mode |= uint16(i.virtualOwner.atomicMode()) &^ linux.S_IFMT
+ } else {
+ ls.Mode |= s.Mode &^ linux.S_IFMT
+ }
}
if s.Mask&linux.STATX_NLINK != 0 {
ls.Nlink = s.Nlink
}
if s.Mask&linux.STATX_UID != 0 {
- ls.UID = s.Uid
+ if i.virtualOwner.enabled {
+ ls.UID = i.virtualOwner.atomicUID()
+ } else {
+ ls.UID = s.Uid
+ }
}
if s.Mask&linux.STATX_GID != 0 {
- ls.GID = s.Gid
+ if i.virtualOwner.enabled {
+ ls.GID = i.virtualOwner.atomicGID()
+ } else {
+ ls.GID = s.Gid
+ }
}
if s.Mask&linux.STATX_ATIME != 0 {
ls.Atime = unixToLinuxStatxTimestamp(s.Atime)
@@ -365,7 +422,7 @@ func (i *inode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOp
return ls, nil
}
-// fstat is a best-effort fallback for inode.Stat() if the host does not
+// statxFromStat is a best-effort fallback for inode.Stat() if the host does not
// support statx(2).
//
// We ignore the mask and sync flags in opts and simply supply
@@ -373,9 +430,9 @@ func (i *inode) Stat(ctx context.Context, vfsfs *vfs.Filesystem, opts vfs.StatOp
// of a mask or sync flags. fstat(2) does not provide any metadata
// equivalent to Statx.Attributes, Statx.AttributesMask, or Statx.Btime, so
// those fields remain empty.
-func (i *inode) fstat(fs *filesystem) (linux.Statx, error) {
+func (i *inode) statxFromStat(fs *filesystem) (linux.Statx, error) {
var s unix.Stat_t
- if err := unix.Fstat(i.hostFD, &s); err != nil {
+ if err := i.stat(&s); err != nil {
return linux.Statx{}, err
}
@@ -399,7 +456,21 @@ func (i *inode) fstat(fs *filesystem) (linux.Statx, error) {
}, nil
}
+func (i *inode) stat(stat *unix.Stat_t) error {
+ if err := unix.Fstat(i.hostFD, stat); err != nil {
+ return err
+ }
+ if i.virtualOwner.enabled {
+ stat.Uid = i.virtualOwner.atomicUID()
+ stat.Gid = i.virtualOwner.atomicGID()
+ stat.Mode = i.virtualOwner.atomicMode()
+ }
+ return nil
+}
+
// SetStat implements kernfs.Inode.SetStat.
+//
+// +checklocksignore
func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {
s := &opts.Stat
@@ -407,11 +478,22 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre
if m == 0 {
return nil
}
- if m&^(linux.STATX_MODE|linux.STATX_SIZE|linux.STATX_ATIME|linux.STATX_MTIME) != 0 {
- return syserror.EPERM
+ supportedModes := uint32(linux.STATX_MODE | linux.STATX_SIZE | linux.STATX_ATIME | linux.STATX_MTIME)
+ if i.virtualOwner.enabled {
+ if m&virtualOwnerModes != 0 {
+ // Take lock if any of the virtual owner fields will be updated.
+ i.virtualOwner.mu.Lock()
+ defer i.virtualOwner.mu.Unlock()
+ }
+
+ supportedModes |= virtualOwnerModes
}
+ if m&^supportedModes != 0 {
+ return linuxerr.EPERM
+ }
+
var hostStat unix.Stat_t
- if err := unix.Fstat(i.hostFD, &hostStat); err != nil {
+ if err := i.stat(&hostStat); err != nil {
return err
}
if err := vfs.CheckSetStat(ctx, creds, &opts, linux.FileMode(hostStat.Mode), auth.KUID(hostStat.Uid), auth.KGID(hostStat.Gid)); err != nil {
@@ -419,13 +501,17 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre
}
if m&linux.STATX_MODE != 0 {
- if err := unix.Fchmod(i.hostFD, uint32(s.Mode)); err != nil {
- return err
+ if i.virtualOwner.enabled {
+ i.virtualOwner.mode = uint32(opts.Stat.Mode)
+ } else {
+ if err := unix.Fchmod(i.hostFD, uint32(s.Mode)); err != nil {
+ return err
+ }
}
}
if m&linux.STATX_SIZE != 0 {
if hostStat.Mode&linux.S_IFMT != linux.S_IFREG {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if err := unix.Ftruncate(i.hostFD, int64(s.Size)); err != nil {
return err
@@ -448,6 +534,14 @@ func (i *inode) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *auth.Cre
return err
}
}
+ if i.virtualOwner.enabled {
+ if m&linux.STATX_UID != 0 {
+ i.virtualOwner.uid = opts.Stat.UID
+ }
+ if m&linux.STATX_GID != 0 {
+ i.virtualOwner.gid = opts.Stat.GID
+ }
+ }
return nil
}
@@ -470,18 +564,17 @@ func (i *inode) DecRef(ctx context.Context) {
func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
// Once created, we cannot re-open a socket fd through /proc/[pid]/fd/.
if i.Mode().FileType() == linux.S_IFSOCK {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
- return i.open(ctx, d, rp.Mount(), opts.Flags)
-}
-
-func (i *inode) open(ctx context.Context, d *kernfs.Dentry, mnt *vfs.Mount, flags uint32) (*vfs.FileDescription, error) {
- var s unix.Stat_t
- if err := unix.Fstat(i.hostFD, &s); err != nil {
+ var stat unix.Stat_t
+ if err := i.stat(&stat); err != nil {
return nil, err
}
- fileType := s.Mode & linux.FileTypeMask
+ fileType := linux.FileMode(stat.Mode).FileType()
+ return i.open(ctx, d, rp.Mount(), fileType, opts.Flags)
+}
+func (i *inode) open(ctx context.Context, d *kernfs.Dentry, mnt *vfs.Mount, fileType linux.FileMode, flags uint32) (*vfs.FileDescription, error) {
// Constrain flags to a subset we can handle.
//
// TODO(gvisor.dev/issue/2601): Support O_NONBLOCK by adding RWF_NOWAIT to pread/pwrite calls.
@@ -491,7 +584,7 @@ func (i *inode) open(ctx context.Context, d *kernfs.Dentry, mnt *vfs.Mount, flag
case unix.S_IFSOCK:
if i.isTTY {
log.Warningf("cannot use host socket fd %d as TTY", i.hostFD)
- return nil, syserror.ENOTTY
+ return nil, linuxerr.ENOTTY
}
ep, err := newEndpoint(ctx, i.hostFD, &i.queue)
@@ -529,7 +622,7 @@ func (i *inode) open(ctx context.Context, d *kernfs.Dentry, mnt *vfs.Mount, flag
default:
log.Warningf("cannot import host fd %d with file type %o", i.hostFD, fileType)
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
}
@@ -584,12 +677,12 @@ func (f *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, off
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
i := f.inode
if !i.seekable {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
return readFromHostFD(ctx, i.hostFD, dst, offset, opts.Flags)
@@ -601,7 +694,7 @@ func (f *fileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^linux.RWF_HIPRI != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
i := f.inode
@@ -618,7 +711,7 @@ func (f *fileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts
if total != 0 {
err = nil
} else {
- err = syserror.ErrWouldBlock
+ err = linuxerr.ErrWouldBlock
}
}
return total, err
@@ -660,7 +753,7 @@ func readFromHostFD(ctx context.Context, hostFD int, dst usermem.IOSequence, off
// PWrite implements vfs.FileDescriptionImpl.PWrite.
func (f *fileDescription) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
if !f.inode.seekable {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
return f.writeToHostFD(ctx, src, offset, opts.Flags)
@@ -672,7 +765,7 @@ func (f *fileDescription) Write(ctx context.Context, src usermem.IOSequence, opt
if !i.seekable {
n, err := f.writeToHostFD(ctx, src, -1, opts.Flags)
if isBlockError(err) {
- err = syserror.ErrWouldBlock
+ err = linuxerr.ErrWouldBlock
}
return n, err
}
@@ -700,7 +793,7 @@ func (f *fileDescription) writeToHostFD(ctx context.Context, src usermem.IOSeque
hostFD := f.inode.hostFD
// TODO(gvisor.dev/issue/2601): Support select pwritev2 flags.
if flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
writer := hostfd.GetReadWriterAt(int32(hostFD), offset, flags)
n, err := src.CopyInTo(ctx, writer)
@@ -721,7 +814,7 @@ func (f *fileDescription) writeToHostFD(ctx context.Context, src usermem.IOSeque
func (f *fileDescription) Seek(_ context.Context, offset int64, whence int32) (int64, error) {
i := f.inode
if !i.seekable {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
f.offsetMu.Lock()
@@ -730,17 +823,17 @@ func (f *fileDescription) Seek(_ context.Context, offset int64, whence int32) (i
switch whence {
case linux.SEEK_SET:
if offset < 0 {
- return f.offset, syserror.EINVAL
+ return f.offset, linuxerr.EINVAL
}
f.offset = offset
case linux.SEEK_CUR:
// Check for overflow. Note that underflow cannot occur, since f.offset >= 0.
if offset > math.MaxInt64-f.offset {
- return f.offset, syserror.EOVERFLOW
+ return f.offset, linuxerr.EOVERFLOW
}
if f.offset+offset < 0 {
- return f.offset, syserror.EINVAL
+ return f.offset, linuxerr.EINVAL
}
f.offset += offset
@@ -753,10 +846,10 @@ func (f *fileDescription) Seek(_ context.Context, offset int64, whence int32) (i
// Check for overflow. Note that underflow cannot occur, since size >= 0.
if offset > math.MaxInt64-size {
- return f.offset, syserror.EOVERFLOW
+ return f.offset, linuxerr.EOVERFLOW
}
if size+offset < 0 {
- return f.offset, syserror.EINVAL
+ return f.offset, linuxerr.EINVAL
}
f.offset = size + offset
@@ -773,7 +866,7 @@ func (f *fileDescription) Seek(_ context.Context, offset int64, whence int32) (i
default:
// Invalid whence.
- return f.offset, syserror.EINVAL
+ return f.offset, linuxerr.EINVAL
}
return f.offset, nil
@@ -790,7 +883,7 @@ func (f *fileDescription) ConfigureMMap(_ context.Context, opts *memmap.MMapOpts
// NOTE(b/38213152): Technically, some obscure char devices can be memory
// mapped, but we only allow regular files.
if f.inode.ftype != unix.S_IFREG {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
i := f.inode
i.CachedMappable.InitFileMapperOnce()
diff --git a/pkg/sentry/fsimpl/host/socket.go b/pkg/sentry/fsimpl/host/socket.go
index ca85f5601..709d5747d 100644
--- a/pkg/sentry/fsimpl/host/socket.go
+++ b/pkg/sentry/fsimpl/host/socket.go
@@ -21,6 +21,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/socket/control"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/unet"
"gvisor.dev/gvisor/pkg/waiter"
@@ -158,9 +158,9 @@ func (c *ConnectedEndpoint) Send(ctx context.Context, data [][]byte, controlMess
if n < totalLen && err == nil {
// The host only returns a short write if it would otherwise
// block (and only for stream sockets).
- err = syserror.EAGAIN
+ err = linuxerr.EAGAIN
}
- if n > 0 && err != syserror.EAGAIN {
+ if n > 0 && !linuxerr.Equals(linuxerr.EAGAIN, err) {
// The caller may need to block to send more data, but
// otherwise there isn't anything that can be done about an
// error with a partial write.
diff --git a/pkg/sentry/fsimpl/host/socket_iovec.go b/pkg/sentry/fsimpl/host/socket_iovec.go
index b123a63ee..292b44c43 100644
--- a/pkg/sentry/fsimpl/host/socket_iovec.go
+++ b/pkg/sentry/fsimpl/host/socket_iovec.go
@@ -16,8 +16,8 @@ package host
import (
"golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/iovec"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/sentry/hostfd"
)
// copyToMulti copies as many bytes from src to dst as possible.
@@ -64,13 +64,13 @@ func buildIovec(bufs [][]byte, maxlen int64, truncate bool) (length int64, iovec
if length > maxlen {
if truncate {
stopLen = maxlen
- err = syserror.EAGAIN
+ err = linuxerr.EAGAIN
} else {
- return 0, nil, nil, syserror.EMSGSIZE
+ return 0, nil, nil, linuxerr.EMSGSIZE
}
}
- if iovsRequired > iovec.MaxIovs {
+ if iovsRequired > hostfd.MaxSendRecvMsgIov {
// The kernel will reject our call if we pass this many iovs.
// Use a single intermediate buffer instead.
b := make([]byte, stopLen)
diff --git a/pkg/sentry/fsimpl/host/tty.go b/pkg/sentry/fsimpl/host/tty.go
index 0f9e20a84..04ac73255 100644
--- a/pkg/sentry/fsimpl/host/tty.go
+++ b/pkg/sentry/fsimpl/host/tty.go
@@ -17,13 +17,13 @@ package host
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -147,7 +147,7 @@ func (t *TTYFileDescription) Write(ctx context.Context, src usermem.IOSequence,
func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {
task := kernel.TaskFromContext(ctx)
if task == nil {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// Ignore arg[0]. This is the real FD:
@@ -188,7 +188,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch
pidns := kernel.PIDNamespaceFromContext(ctx)
if pidns == nil {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
t.mu.Lock()
@@ -211,15 +211,15 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
// drivers/tty/tty_io.c:tiocspgrp() converts -EIO from tty_check_change()
// to -ENOTTY.
- if err == syserror.EIO {
- return 0, syserror.ENOTTY
+ if linuxerr.Equals(linuxerr.EIO, err) {
+ return 0, linuxerr.ENOTTY
}
return 0, err
}
// Check that calling task's process group is in the TTY session.
if task.ThreadGroup().Session() != t.session {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
var pgIDP primitive.Int32
@@ -230,19 +230,19 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch
// pgID must be non-negative.
if pgID < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Process group with pgID must exist in this PID namespace.
pidns := task.PIDNamespace()
pg := pidns.ProcessGroupWithID(pgID)
if pg == nil {
- return 0, syserror.ESRCH
+ return 0, linuxerr.ESRCH
}
// Check that new process group is in the TTY session.
if pg.Session() != t.session {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
t.fgProcessGroup = pg
@@ -302,7 +302,7 @@ func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, args arch
unimpl.EmitUnimplementedEvent(ctx)
fallthrough
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
@@ -345,7 +345,7 @@ func (t *TTYFileDescription) checkChange(ctx context.Context, sig linux.Signal)
// If the signal is SIGTTIN, then we are attempting to read
// from the TTY. Don't send the signal and return EIO.
if sig == linux.SIGTTIN {
- return syserror.EIO
+ return linuxerr.EIO
}
// Otherwise, we are writing or changing terminal state. This is allowed.
@@ -354,7 +354,7 @@ func (t *TTYFileDescription) checkChange(ctx context.Context, sig linux.Signal)
// If the process group is an orphan, return EIO.
if pg.IsOrphan() {
- return syserror.EIO
+ return linuxerr.EIO
}
// Otherwise, send the signal to the process group and return ERESTARTSYS.
@@ -367,5 +367,5 @@ func (t *TTYFileDescription) checkChange(ctx context.Context, sig linux.Signal)
//
// Linux ignores the result of kill_pgrp().
_ = pg.SendSignal(kernel.SignalInfoPriv(sig))
- return syserror.ERESTARTSYS
+ return linuxerr.ERESTARTSYS
}
diff --git a/pkg/sentry/fsimpl/host/util.go b/pkg/sentry/fsimpl/host/util.go
index 63b465859..9850f3f41 100644
--- a/pkg/sentry/fsimpl/host/util.go
+++ b/pkg/sentry/fsimpl/host/util.go
@@ -17,7 +17,7 @@ package host
import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
func toTimespec(ts linux.StatxTimestamp, omit bool) unix.Timespec {
@@ -42,7 +42,7 @@ func timespecToStatxTimestamp(ts unix.Timespec) linux.StatxTimestamp {
}
// isBlockError checks if an error is EAGAIN or EWOULDBLOCK.
-// If so, they can be transformed into syserror.ErrWouldBlock.
+// If so, they can be transformed into linuxerr.ErrWouldBlock.
func isBlockError(err error) bool {
- return err == syserror.EAGAIN || err == syserror.EWOULDBLOCK
+ return linuxerr.Equals(linuxerr.EAGAIN, err) || linuxerr.Equals(linuxerr.EWOULDBLOCK, err)
}
diff --git a/pkg/sentry/fsimpl/kernfs/BUILD b/pkg/sentry/fsimpl/kernfs/BUILD
index b7d13cced..4b577ea43 100644
--- a/pkg/sentry/fsimpl/kernfs/BUILD
+++ b/pkg/sentry/fsimpl/kernfs/BUILD
@@ -104,6 +104,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/log",
@@ -118,7 +119,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
@@ -135,6 +135,8 @@ go_test(
":kernfs",
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
+ "//pkg/fspath",
"//pkg/log",
"//pkg/refs",
"//pkg/refsvfs2",
@@ -142,7 +144,6 @@ go_test(
"//pkg/sentry/fsimpl/testutil",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
"@com_github_google_go_cmp//cmp:go_default_library",
],
diff --git a/pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go b/pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go
index 84b1c3745..9d7526e47 100644
--- a/pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go
+++ b/pkg/sentry/fsimpl/kernfs/dynamic_bytes_file.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -71,7 +71,7 @@ func (f *DynamicBytesFile) Open(ctx context.Context, rp *vfs.ResolvingPath, d *D
// inode attributes to be changed. Override SetStat() making it call
// f.InodeAttrs to allow it.
func (*DynamicBytesFile) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// DynamicBytesFD implements vfs.FileDescriptionImpl for an FD backed by a
@@ -137,5 +137,5 @@ func (fd *DynamicBytesFD) Stat(ctx context.Context, opts vfs.StatOptions) (linux
// SetStat implements vfs.FileDescriptionImpl.SetStat.
func (fd *DynamicBytesFD) SetStat(context.Context, vfs.SetStatOptions) error {
// DynamicBytesFiles are immutable.
- return syserror.EPERM
+ return linuxerr.EPERM
}
diff --git a/pkg/sentry/fsimpl/kernfs/fd_impl_util.go b/pkg/sentry/fsimpl/kernfs/fd_impl_util.go
index e55111af0..7db1473c4 100644
--- a/pkg/sentry/fsimpl/kernfs/fd_impl_util.go
+++ b/pkg/sentry/fsimpl/kernfs/fd_impl_util.go
@@ -19,11 +19,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -98,7 +98,7 @@ func NewGenericDirectoryFD(m *vfs.Mount, d *Dentry, children *OrderedChildren, l
func (fd *GenericDirectoryFD) Init(children *OrderedChildren, locks *vfs.FileLocks, opts *vfs.OpenOptions, fdOpts GenericDirectoryFDOptions) error {
if vfs.AccessTypesForOpenFlags(opts)&vfs.MayWrite != 0 {
// Can't open directories for writing.
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
fd.LockFD.Init(locks)
fd.seekEnd = fdOpts.SeekEnd
@@ -248,10 +248,10 @@ func (fd *GenericDirectoryFD) Seek(ctx context.Context, offset int64, whence int
panic(fmt.Sprintf("Invalid GenericDirectoryFD.seekEnd = %v", fd.seekEnd))
}
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
fd.off = offset
return offset, nil
diff --git a/pkg/sentry/fsimpl/kernfs/filesystem.go b/pkg/sentry/fsimpl/kernfs/filesystem.go
index 8fac53c60..363ebc466 100644
--- a/pkg/sentry/fsimpl/kernfs/filesystem.go
+++ b/pkg/sentry/fsimpl/kernfs/filesystem.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// stepExistingLocked resolves rp.Component() in parent directory vfsd.
@@ -39,7 +39,7 @@ import (
// Postcondition: Caller must call fs.processDeferredDecRefs*.
func (fs *Filesystem) stepExistingLocked(ctx context.Context, rp *vfs.ResolvingPath, d *Dentry, mayFollowSymlinks bool) (*Dentry, error) {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// Directory searchable?
if err := d.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayExec); err != nil {
@@ -70,7 +70,7 @@ afterSymlink:
return d.parent, nil
}
if len(name) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
d.dirMu.Lock()
next, err := fs.revalidateChildLocked(ctx, rp.VirtualFilesystem(), d, name, d.children[name])
@@ -169,7 +169,7 @@ func (fs *Filesystem) walkExistingLocked(ctx context.Context, rp *vfs.ResolvingP
}
}
if rp.MustBeDir() && !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -196,7 +196,7 @@ func (fs *Filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving
}
}
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -214,16 +214,16 @@ func checkCreateLocked(ctx context.Context, creds *auth.Credentials, name string
return err
}
if name == "." || name == ".." {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if len(name) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
if _, ok := parent.children[name]; ok {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if parent.VFSDentry().IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := parent.inode.CheckPermissions(ctx, creds, vfs.MayWrite); err != nil {
return err
@@ -237,10 +237,10 @@ func checkCreateLocked(ctx context.Context, creds *auth.Credentials, name string
func checkDeleteLocked(ctx context.Context, rp *vfs.ResolvingPath, d *Dentry) error {
parent := d.parent
if parent == nil {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
if parent.vfsd.IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := parent.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayWrite|vfs.MayExec); err != nil {
return err
@@ -317,7 +317,7 @@ func (fs *Filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, op
if opts.CheckSearchable {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -344,7 +344,7 @@ func (fs *Filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa
// LinkAt implements vfs.FilesystemImpl.LinkAt.
func (fs *Filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
if rp.Done() {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
fs.mu.Lock()
defer fs.processDeferredDecRefs(ctx)
@@ -361,10 +361,10 @@ func (fs *Filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.
return err
}
if rp.MustBeDir() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if rp.Mount() != vd.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := rp.Mount().CheckBeginWrite(); err != nil {
return err
@@ -373,7 +373,7 @@ func (fs *Filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.
d := vd.Dentry().Impl().(*Dentry)
if d.isDir() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
childI, err := parent.inode.NewLink(ctx, pc, d.inode)
@@ -389,7 +389,7 @@ func (fs *Filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.
// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
func (fs *Filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
if rp.Done() {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
fs.mu.Lock()
defer fs.processDeferredDecRefs(ctx)
@@ -411,7 +411,7 @@ func (fs *Filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
defer rp.Mount().EndWrite()
childI, err := parent.inode.NewDir(ctx, pc, opts)
if err != nil {
- if !opts.ForSyntheticMountpoint || err == syserror.EEXIST {
+ if !opts.ForSyntheticMountpoint || linuxerr.Equals(linuxerr.EEXIST, err) {
return err
}
childI = newSyntheticDirectory(ctx, rp.Credentials(), opts.Mode)
@@ -425,7 +425,7 @@ func (fs *Filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
// MknodAt implements vfs.FilesystemImpl.MknodAt.
func (fs *Filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
if rp.Done() {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
fs.mu.Lock()
defer fs.processDeferredDecRefs(ctx)
@@ -442,7 +442,7 @@ func (fs *Filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
return err
}
if rp.MustBeDir() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := rp.Mount().CheckBeginWrite(); err != nil {
return err
@@ -508,10 +508,10 @@ func (fs *Filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
defer unlock()
if rp.Done() {
if rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
if err := d.inode.CheckPermissions(ctx, rp.Credentials(), ats); err != nil {
return nil, err
@@ -535,18 +535,18 @@ afterTrailingSymlink:
}
// Reject attempts to open directories with O_CREAT.
if rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
pc := rp.Component()
if pc == "." || pc == ".." {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if len(pc) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
// Determine whether or not we need to create a file.
child, err := fs.stepExistingLocked(ctx, rp, parent, false /* mayFollowSymlinks */)
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
// Already checked for searchability above; now check for writability.
if err := parent.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayWrite); err != nil {
return nil, err
@@ -576,7 +576,7 @@ afterTrailingSymlink:
}
// Open existing file or follow symlink.
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
if rp.ShouldFollowSymlink() && child.isSymlink() {
targetVD, targetPathname, err := child.inode.Getlink(ctx, rp.Mount())
@@ -622,7 +622,7 @@ func (fs *Filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (st
}
if !d.isSymlink() {
fs.mu.RUnlock()
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
// Inode.Readlink() cannot be called holding fs locks.
@@ -648,13 +648,13 @@ func (fs *Filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
// Only RENAME_NOREPLACE is supported.
if opts.Flags&^linux.RENAME_NOREPLACE != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
noReplace := opts.Flags&linux.RENAME_NOREPLACE != 0
mnt := rp.Mount()
if mnt != oldParentVD.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := mnt.CheckBeginWrite(); err != nil {
return err
@@ -680,17 +680,19 @@ func (fs *Filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
newName := rp.Component()
if newName == "." || newName == ".." {
if noReplace {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
- switch err := checkCreateLocked(ctx, rp.Credentials(), newName, dstDir); err {
- case nil:
+
+ err = checkCreateLocked(ctx, rp.Credentials(), newName, dstDir)
+ switch {
+ case err == nil:
// Ok, continue with rename as replacement.
- case syserror.EEXIST:
+ case linuxerr.Equals(linuxerr.EEXIST, err):
if noReplace {
// Won't overwrite existing node since RENAME_NOREPLACE was requested.
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
dst = dstDir.children[newName]
if dst == nil {
@@ -749,7 +751,7 @@ func (fs *Filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
fs.deferDecRef(replaced)
replaceVFSD = replaced.VFSDentry()
}
- virtfs.CommitRenameReplaceDentry(ctx, srcVFSD, replaceVFSD)
+ virtfs.CommitRenameReplaceDentry(ctx, srcVFSD, replaceVFSD) // +checklocksforce: to may be nil, that's okay.
return nil
}
@@ -771,10 +773,10 @@ func (fs *Filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
return err
}
if !d.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
if d.inode.HasChildren() {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
virtfs := rp.VirtualFilesystem()
parentDentry := d.parent
@@ -785,7 +787,7 @@ func (fs *Filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
defer mntns.DecRef(ctx)
vfsd := d.VFSDentry()
if err := virtfs.PrepareDeleteDentry(mntns, vfsd); err != nil {
- return err
+ return err // +checklocksforce: vfsd is not locked.
}
if err := parentDentry.inode.RmDir(ctx, d.name, d.inode); err != nil {
@@ -841,7 +843,7 @@ func (fs *Filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu
// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
func (fs *Filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
if rp.Done() {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
fs.mu.Lock()
defer fs.processDeferredDecRefs(ctx)
@@ -858,7 +860,7 @@ func (fs *Filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, targ
return err
}
if rp.MustBeDir() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := rp.Mount().CheckBeginWrite(); err != nil {
return err
@@ -892,7 +894,7 @@ func (fs *Filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error
return err
}
if d.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
virtfs := rp.VirtualFilesystem()
parentDentry := d.parent
@@ -927,7 +929,7 @@ func (fs *Filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath
if err := d.inode.CheckPermissions(ctx, rp.Credentials(), vfs.MayWrite); err != nil {
return nil, err
}
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
@@ -940,7 +942,7 @@ func (fs *Filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, si
return nil, err
}
// kernfs currently does not support extended attributes.
- return nil, syserror.ENOTSUP
+ return nil, linuxerr.ENOTSUP
}
// GetXattrAt implements vfs.FilesystemImpl.GetXattrAt.
@@ -953,7 +955,7 @@ func (fs *Filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt
return "", err
}
// kernfs currently does not support extended attributes.
- return "", syserror.ENOTSUP
+ return "", linuxerr.ENOTSUP
}
// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
@@ -966,7 +968,7 @@ func (fs *Filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt
return err
}
// kernfs currently does not support extended attributes.
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
}
// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
@@ -979,7 +981,7 @@ func (fs *Filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath,
return err
}
// kernfs currently does not support extended attributes.
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
}
// PrependPath implements vfs.FilesystemImpl.PrependPath.
diff --git a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
index 3d0866ecf..b96dc9ef7 100644
--- a/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
+++ b/pkg/sentry/fsimpl/kernfs/inode_impl_util.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// InodeNoopRefCount partially implements the Inode interface, specifically the
@@ -61,27 +61,27 @@ type InodeDirectoryNoNewChildren struct{}
// NewFile implements Inode.NewFile.
func (InodeDirectoryNoNewChildren) NewFile(context.Context, string, vfs.OpenOptions) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewDir implements Inode.NewDir.
func (InodeDirectoryNoNewChildren) NewDir(context.Context, string, vfs.MkdirOptions) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewLink implements Inode.NewLink.
func (InodeDirectoryNoNewChildren) NewLink(context.Context, string, Inode) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewSymlink implements Inode.NewSymlink.
func (InodeDirectoryNoNewChildren) NewSymlink(context.Context, string, string) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewNode implements Inode.NewNode.
func (InodeDirectoryNoNewChildren) NewNode(context.Context, string, vfs.MknodOptions) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// InodeNotDirectory partially implements the Inode interface, specifically the
@@ -158,12 +158,12 @@ type InodeNotSymlink struct{}
// Readlink implements Inode.Readlink.
func (InodeNotSymlink) Readlink(context.Context, *vfs.Mount) (string, error) {
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
// Getlink implements Inode.Getlink.
func (InodeNotSymlink) Getlink(context.Context, *vfs.Mount) (vfs.VirtualDentry, string, error) {
- return vfs.VirtualDentry{}, "", syserror.EINVAL
+ return vfs.VirtualDentry{}, "", linuxerr.EINVAL
}
// InodeAttrs partially implements the Inode interface, specifically the
@@ -233,6 +233,11 @@ func (a *InodeAttrs) Mode() linux.FileMode {
return linux.FileMode(atomic.LoadUint32(&a.mode))
}
+// Links returns the link count.
+func (a *InodeAttrs) Links() uint32 {
+ return atomic.LoadUint32(&a.nlink)
+}
+
// TouchAtime updates a.atime to the current time.
func (a *InodeAttrs) TouchAtime(ctx context.Context, mnt *vfs.Mount) {
if mnt.Flags.NoATime || mnt.ReadOnly() {
@@ -285,10 +290,10 @@ func (a *InodeAttrs) SetStat(ctx context.Context, fs *vfs.Filesystem, creds *aut
// allowed by kernfs files but does not do anything. If some other behavior is
// needed, the embedder should consider extending SetStat.
if opts.Stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID|linux.STATX_ATIME|linux.STATX_MTIME|linux.STATX_SIZE) != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if opts.Stat.Mask&linux.STATX_SIZE != 0 && a.Mode().IsDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if err := vfs.CheckSetStat(ctx, creds, &opts, a.Mode(), auth.KUID(atomic.LoadUint32(&a.uid)), auth.KGID(atomic.LoadUint32(&a.gid))); err != nil {
return err
@@ -474,7 +479,7 @@ func (o *OrderedChildren) Lookup(ctx context.Context, name string) (Inode, error
s, ok := o.set[name]
if !ok {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
s.inode.IncRef() // This ref is passed to the dentry upon creation via Init.
@@ -501,6 +506,30 @@ func (o *OrderedChildren) Insert(name string, child Inode) error {
return o.insert(name, child, false)
}
+// Inserter is like Insert, but obtains the child to insert by calling
+// makeChild. makeChild is only called if the insert will succeed. This allows
+// the caller to atomically check and insert a child without having to
+// clean up the child on failure.
+func (o *OrderedChildren) Inserter(name string, makeChild func() Inode) (Inode, error) {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ if _, ok := o.set[name]; ok {
+ return nil, linuxerr.EEXIST
+ }
+
+ // Note: We must not fail after we call makeChild().
+
+ child := makeChild()
+ s := &slot{
+ name: name,
+ inode: child,
+ static: false,
+ }
+ o.order.PushBack(s)
+ o.set[name] = s
+ return child, nil
+}
+
// insert inserts child into o.
//
// Precondition: Caller must be holding a ref on child if static is true.
@@ -510,7 +539,7 @@ func (o *OrderedChildren) insert(name string, child Inode, static bool) error {
o.mu.Lock()
defer o.mu.Unlock()
if _, ok := o.set[name]; ok {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
s := &slot{
name: name,
@@ -558,7 +587,7 @@ func (o *OrderedChildren) replaceChildLocked(ctx context.Context, name string, n
func (o *OrderedChildren) checkExistingLocked(name string, child Inode) error {
s, ok := o.set[name]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if s.inode != child {
panic(fmt.Sprintf("Inode doesn't match what kernfs thinks! OrderedChild: %+v, kernfs: %+v", s.inode, child))
@@ -569,7 +598,7 @@ func (o *OrderedChildren) checkExistingLocked(name string, child Inode) error {
// Unlink implements Inode.Unlink.
func (o *OrderedChildren) Unlink(ctx context.Context, name string, child Inode) error {
if !o.writable {
- return syserror.EPERM
+ return linuxerr.EPERM
}
o.mu.Lock()
defer o.mu.Unlock()
@@ -599,15 +628,15 @@ func (o *OrderedChildren) RmDir(ctx context.Context, name string, child Inode) e
// Postcondition: reference on any replaced dentry transferred to caller.
func (o *OrderedChildren) Rename(ctx context.Context, oldname, newname string, child, dstDir Inode) error {
if !o.writable {
- return syserror.EPERM
+ return linuxerr.EPERM
}
dst, ok := dstDir.(interface{}).(*OrderedChildren)
if !ok {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if !dst.writable {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Note: There's a potential deadlock below if concurrent calls to Rename
@@ -653,7 +682,7 @@ type InodeSymlink struct {
// Open implements Inode.Open.
func (InodeSymlink) Open(ctx context.Context, rp *vfs.ResolvingPath, d *Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- return nil, syserror.ELOOP
+ return nil, linuxerr.ELOOP
}
// StaticDirectory is a standard implementation of a directory with static
@@ -709,7 +738,7 @@ func (s *StaticDirectory) Open(ctx context.Context, rp *vfs.ResolvingPath, d *De
// SetStat implements Inode.SetStat not allowing inode attributes to be changed.
func (*StaticDirectory) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// DecRef implements Inode.DecRef.
@@ -745,5 +774,5 @@ type InodeNoStatFS struct{}
// StatFS implements Inode.StatFS.
func (*InodeNoStatFS) StatFS(context.Context, *vfs.Filesystem) (linux.Statfs, error) {
- return linux.Statfs{}, syserror.ENOSYS
+ return linux.Statfs{}, linuxerr.ENOSYS
}
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs.go b/pkg/sentry/fsimpl/kernfs/kernfs.go
index 6f699c9cd..544698694 100644
--- a/pkg/sentry/fsimpl/kernfs/kernfs.go
+++ b/pkg/sentry/fsimpl/kernfs/kernfs.go
@@ -52,7 +52,7 @@
// vfs.VirtualFilesystem.mountMu
// vfs.Dentry.mu
// (inode implementation locks, if any)
-// kernfs.Filesystem.droppedDentriesMu
+// kernfs.Filesystem.deferredDecRefsMu
package kernfs
import (
@@ -61,6 +61,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -76,12 +77,12 @@ import (
type Filesystem struct {
vfsfs vfs.Filesystem
- droppedDentriesMu sync.Mutex `state:"nosave"`
+ deferredDecRefsMu sync.Mutex `state:"nosave"`
- // droppedDentries is a list of dentries waiting to be DecRef()ed. This is
+ // deferredDecRefs is a list of dentries waiting to be DecRef()ed. This is
// used to defer dentry destruction until mu can be acquired for
- // writing. Protected by droppedDentriesMu.
- droppedDentries []*Dentry
+ // writing. Protected by deferredDecRefsMu.
+ deferredDecRefs []refsvfs2.RefCounter
// mu synchronizes the lifetime of Dentries on this filesystem. Holding it
// for reading guarantees continued existence of any resolved dentries, but
@@ -131,25 +132,49 @@ type Filesystem struct {
// deferDecRef defers dropping a dentry ref until the next call to
// processDeferredDecRefs{,Locked}. See comment on Filesystem.mu.
// This may be called while Filesystem.mu or Dentry.dirMu is locked.
-func (fs *Filesystem) deferDecRef(d *Dentry) {
- fs.droppedDentriesMu.Lock()
- fs.droppedDentries = append(fs.droppedDentries, d)
- fs.droppedDentriesMu.Unlock()
+func (fs *Filesystem) deferDecRef(d refsvfs2.RefCounter) {
+ fs.deferredDecRefsMu.Lock()
+ fs.deferredDecRefs = append(fs.deferredDecRefs, d)
+ fs.deferredDecRefsMu.Unlock()
+}
+
+// SafeDecRefFD safely DecRef the FileDescription making sure DecRef is deferred
+// in case Filesystem.mu is held. See comment on Filesystem.mu.
+func (fs *Filesystem) SafeDecRefFD(ctx context.Context, fd *vfs.FileDescription) {
+ if d, ok := fd.Dentry().Impl().(*Dentry); ok && d.fs == fs {
+ // Only defer if dentry belongs to this filesystem, since locks cannot cross
+ // filesystems.
+ fs.deferDecRef(fd)
+ return
+ }
+ fd.DecRef(ctx)
+}
+
+// SafeDecRef safely DecRef the virtual dentry making sure DecRef is deferred
+// in case Filesystem.mu is held. See comment on Filesystem.mu.
+func (fs *Filesystem) SafeDecRef(ctx context.Context, vd vfs.VirtualDentry) {
+ if d, ok := vd.Dentry().Impl().(*Dentry); ok && d.fs == fs {
+ // Only defer if dentry belongs to this filesystem, since locks cannot cross
+ // filesystems.
+ fs.deferDecRef(&vd)
+ return
+ }
+ vd.DecRef(ctx)
}
// processDeferredDecRefs calls vfs.Dentry.DecRef on all dentries in the
-// droppedDentries list. See comment on Filesystem.mu.
+// deferredDecRefs list. See comment on Filesystem.mu.
//
// Precondition: Filesystem.mu or Dentry.dirMu must NOT be locked.
func (fs *Filesystem) processDeferredDecRefs(ctx context.Context) {
- fs.droppedDentriesMu.Lock()
- for _, d := range fs.droppedDentries {
- // Defer the DecRef call so that we are not holding droppedDentriesMu
+ fs.deferredDecRefsMu.Lock()
+ for _, d := range fs.deferredDecRefs {
+ // Defer the DecRef call so that we are not holding deferredDecRefsMu
// when DecRef is called.
defer d.DecRef(ctx)
}
- fs.droppedDentries = fs.droppedDentries[:0] // Keep slice memory for reuse.
- fs.droppedDentriesMu.Unlock()
+ fs.deferredDecRefs = fs.deferredDecRefs[:0] // Keep slice memory for reuse.
+ fs.deferredDecRefsMu.Unlock()
}
// VFSFilesystem returns the generic vfs filesystem object.
@@ -518,6 +543,63 @@ func (d *Dentry) FSLocalPath() string {
return b.String()
}
+// WalkDentryTree traverses p in the dentry tree for this filesystem. Note that
+// this only traverses the dentry tree and is not a general path traversal. No
+// symlinks and dynamic children are resolved, and no permission checks are
+// performed. The caller is responsible for ensuring the returned Dentry exists
+// for an appropriate lifetime.
+//
+// p is interpreted starting at d, and may be absolute or relative (absolute vs
+// relative paths both refer to the same target here, since p is absolute from
+// d). p may contain "." and "..", but will not allow traversal above d (similar
+// to ".." at the root dentry).
+//
+// This is useful for filesystem internals, where the filesystem may not be
+// mounted yet. For a mounted filesystem, use GetDentryAt.
+func (d *Dentry) WalkDentryTree(ctx context.Context, vfsObj *vfs.VirtualFilesystem, p fspath.Path) (*Dentry, error) {
+ d.fs.mu.RLock()
+ defer d.fs.processDeferredDecRefs(ctx)
+ defer d.fs.mu.RUnlock()
+
+ target := d
+
+ for pit := p.Begin; pit.Ok(); pit = pit.Next() {
+ pc := pit.String()
+
+ switch {
+ case target == nil:
+ return nil, linuxerr.ENOENT
+ case pc == ".":
+ // No-op, consume component and continue.
+ case pc == "..":
+ if target == d {
+ // Don't let .. traverse above the start point of the walk.
+ continue
+ }
+ target = target.parent
+ // Parent doesn't need revalidation since we revalidated it on the
+ // way to the child, and we're still holding fs.mu.
+ default:
+ var err error
+
+ d.dirMu.Lock()
+ target, err = d.fs.revalidateChildLocked(ctx, vfsObj, target, pc, target.children[pc])
+ d.dirMu.Unlock()
+
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if target == nil {
+ return nil, linuxerr.ENOENT
+ }
+
+ target.IncRef()
+ return target, nil
+}
+
// The Inode interface maps filesystem-level operations that operate on paths to
// equivalent operations on specific filesystem nodes.
//
@@ -643,12 +725,15 @@ type inodeDirectory interface {
// RmDir removes an empty child directory from this directory
// inode. Implementations must update the parent directory's link count,
// if required. Implementations are not responsible for checking that child
- // is a directory, checking for an empty directory.
+ // is a directory, or checking for an empty directory.
RmDir(ctx context.Context, name string, child Inode) error
// Rename is called on the source directory containing an inode being
- // renamed. child should point to the resolved child in the source
- // directory.
+ // renamed. child points to the resolved child in the source directory.
+ // dstDir is guaranteed to be a directory inode.
+ //
+ // On a successful call to Rename, the caller updates the dentry tree to
+ // reflect the name change.
//
// Precondition: Caller must serialize concurrent calls to Rename.
Rename(ctx context.Context, oldname, newname string, child, dstDir Inode) error
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_test.go b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
index 1cd3137e6..a2aba9321 100644
--- a/pkg/sentry/fsimpl/kernfs/kernfs_test.go
+++ b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
@@ -22,12 +22,13 @@ import (
"github.com/google/go-cmp/cmp"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -94,7 +95,7 @@ type attrs struct {
}
func (*attrs) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
type readonlyDir struct {
@@ -196,15 +197,15 @@ func (d *dir) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (k
}
func (*dir) NewLink(context.Context, string, kernfs.Inode) (kernfs.Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
func (*dir) NewSymlink(context.Context, string, string) (kernfs.Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
func (*dir) NewNode(context.Context, string, vfs.MknodOptions) (kernfs.Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
func (fsType) Name() string {
@@ -318,10 +319,10 @@ func TestDirFDReadWrite(t *testing.T) {
defer fd.DecRef(sys.Ctx)
// Read/Write should fail for directory FDs.
- if _, err := fd.Read(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.ReadOptions{}); err != syserror.EISDIR {
+ if _, err := fd.Read(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.ReadOptions{}); !linuxerr.Equals(linuxerr.EISDIR, err) {
t.Fatalf("Read for directory FD failed with unexpected error: %v", err)
}
- if _, err := fd.Write(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.WriteOptions{}); err != syserror.EBADF {
+ if _, err := fd.Write(sys.Ctx, usermem.BytesIOSequence([]byte{}), vfs.WriteOptions{}); !linuxerr.Equals(linuxerr.EBADF, err) {
t.Fatalf("Write for directory FD failed with unexpected error: %v", err)
}
}
@@ -346,3 +347,63 @@ func TestDirFDIterDirents(t *testing.T) {
"file1": linux.DT_REG,
})
}
+
+func TestDirWalkDentryTree(t *testing.T) {
+ sys := newTestSystem(t, func(ctx context.Context, creds *auth.Credentials, fs *filesystem) kernfs.Inode {
+ return fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
+ "dir1": fs.newDir(ctx, creds, 0755, nil),
+ "dir2": fs.newDir(ctx, creds, 0755, map[string]kernfs.Inode{
+ "file1": fs.newFile(ctx, creds, staticFileContent),
+ "dir3": fs.newDir(ctx, creds, 0755, nil),
+ }),
+ })
+ })
+ defer sys.Destroy()
+
+ testWalk := func(from *kernfs.Dentry, getDentryPath, walkPath string, expectedErr error) {
+ var d *kernfs.Dentry
+ if getDentryPath != "" {
+ pop := sys.PathOpAtRoot(getDentryPath)
+ vd := sys.GetDentryOrDie(pop)
+ defer vd.DecRef(sys.Ctx)
+ d = vd.Dentry().Impl().(*kernfs.Dentry)
+ }
+
+ match, err := from.WalkDentryTree(sys.Ctx, sys.VFS, fspath.Parse(walkPath))
+ if err == nil {
+ defer match.DecRef(sys.Ctx)
+ }
+
+ if err != expectedErr {
+ t.Fatalf("WalkDentryTree from %q to %q (with expected error: %v) unexpected error, want: %v, got: %v", from.FSLocalPath(), walkPath, expectedErr, expectedErr, err)
+ }
+ if expectedErr != nil {
+ return
+ }
+
+ if d != match {
+ t.Fatalf("WalkDentryTree from %q to %q (with expected error: %v) found unexpected dentry; want: %v, got: %v", from.FSLocalPath(), walkPath, expectedErr, d, match)
+ }
+ }
+
+ rootD := sys.Root.Dentry().Impl().(*kernfs.Dentry)
+
+ testWalk(rootD, "dir1", "/dir1", nil)
+ testWalk(rootD, "", "/dir-non-existent", linuxerr.ENOENT)
+ testWalk(rootD, "", "/dir1/child-non-existent", linuxerr.ENOENT)
+ testWalk(rootD, "", "/dir2/inner-non-existent/dir3", linuxerr.ENOENT)
+
+ testWalk(rootD, "dir2/dir3", "/dir2/../dir2/dir3", nil)
+ testWalk(rootD, "dir2/dir3", "/dir2/././dir3", nil)
+ testWalk(rootD, "dir2/dir3", "/dir2/././dir3/.././dir3", nil)
+
+ pop := sys.PathOpAtRoot("dir2")
+ dir2VD := sys.GetDentryOrDie(pop)
+ defer dir2VD.DecRef(sys.Ctx)
+ dir2D := dir2VD.Dentry().Impl().(*kernfs.Dentry)
+
+ testWalk(dir2D, "dir2/dir3", "/dir3", nil)
+ testWalk(dir2D, "dir2/dir3", "/../../../dir3", nil)
+ testWalk(dir2D, "dir2/file1", "/file1", nil)
+ testWalk(dir2D, "dir2/file1", "file1", nil)
+}
diff --git a/pkg/sentry/fsimpl/kernfs/symlink.go b/pkg/sentry/fsimpl/kernfs/symlink.go
index a0736c0d6..4adf76ce6 100644
--- a/pkg/sentry/fsimpl/kernfs/symlink.go
+++ b/pkg/sentry/fsimpl/kernfs/symlink.go
@@ -17,9 +17,9 @@ package kernfs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// StaticSymlink provides an Inode implementation for symlinks that point to
@@ -62,5 +62,5 @@ func (s *StaticSymlink) Getlink(context.Context, *vfs.Mount) (vfs.VirtualDentry,
// SetStat implements Inode.SetStat not allowing inode attributes to be changed.
func (*StaticSymlink) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
diff --git a/pkg/sentry/fsimpl/kernfs/synthetic_directory.go b/pkg/sentry/fsimpl/kernfs/synthetic_directory.go
index 11694c392..c91d23b56 100644
--- a/pkg/sentry/fsimpl/kernfs/synthetic_directory.go
+++ b/pkg/sentry/fsimpl/kernfs/synthetic_directory.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// syntheticDirectory implements kernfs.Inode for a directory created by
@@ -65,13 +65,13 @@ func (dir *syntheticDirectory) Open(ctx context.Context, rp *vfs.ResolvingPath,
// NewFile implements Inode.NewFile.
func (dir *syntheticDirectory) NewFile(ctx context.Context, name string, opts vfs.OpenOptions) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewDir implements Inode.NewDir.
func (dir *syntheticDirectory) NewDir(ctx context.Context, name string, opts vfs.MkdirOptions) (Inode, error) {
if !opts.ForSyntheticMountpoint {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
subdirI := newSyntheticDirectory(ctx, auth.CredentialsFromContext(ctx), opts.Mode&linux.PermissionsMask)
if err := dir.OrderedChildren.Insert(name, subdirI); err != nil {
@@ -84,17 +84,17 @@ func (dir *syntheticDirectory) NewDir(ctx context.Context, name string, opts vfs
// NewLink implements Inode.NewLink.
func (dir *syntheticDirectory) NewLink(ctx context.Context, name string, target Inode) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewSymlink implements Inode.NewSymlink.
func (dir *syntheticDirectory) NewSymlink(ctx context.Context, name, target string) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// NewNode implements Inode.NewNode.
func (dir *syntheticDirectory) NewNode(ctx context.Context, name string, opts vfs.MknodOptions) (Inode, error) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
// DecRef implements Inode.DecRef.
diff --git a/pkg/sentry/fsimpl/overlay/BUILD b/pkg/sentry/fsimpl/overlay/BUILD
index 5504476c8..d16dfef9b 100644
--- a/pkg/sentry/fsimpl/overlay/BUILD
+++ b/pkg/sentry/fsimpl/overlay/BUILD
@@ -29,6 +29,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/log",
@@ -41,7 +42,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fsimpl/overlay/copy_up.go b/pkg/sentry/fsimpl/overlay/copy_up.go
index 45aa5a494..520487066 100644
--- a/pkg/sentry/fsimpl/overlay/copy_up.go
+++ b/pkg/sentry/fsimpl/overlay/copy_up.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
func (d *dentry) isCopiedUp() bool {
@@ -36,6 +36,10 @@ func (d *dentry) isCopiedUp() bool {
//
// Preconditions: filesystem.renameMu must be locked.
func (d *dentry) copyUpLocked(ctx context.Context) error {
+ return d.copyUpMaybeSyntheticMountpointLocked(ctx, false /* forSyntheticMountpoint */)
+}
+
+func (d *dentry) copyUpMaybeSyntheticMountpointLocked(ctx context.Context, forSyntheticMountpoint bool) error {
// Fast path.
if d.isCopiedUp() {
return nil
@@ -51,15 +55,15 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {
// Can be copied-up.
default:
// Can't be copied-up.
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Ensure that our parent directory is copied-up.
if d.parent == nil {
// d is a filesystem root with no upper layer.
- return syserror.EROFS
+ return linuxerr.EROFS
}
- if err := d.parent.copyUpLocked(ctx); err != nil {
+ if err := d.parent.copyUpMaybeSyntheticMountpointLocked(ctx, forSyntheticMountpoint); err != nil {
return err
}
@@ -71,7 +75,7 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {
}
if d.vfsd.IsDead() {
// Raced with deletion of d.
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// Obtain settable timestamps from the lower layer.
@@ -168,7 +172,8 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {
case linux.S_IFDIR:
if err := vfsObj.MkdirAt(ctx, d.fs.creds, &newpop, &vfs.MkdirOptions{
- Mode: linux.FileMode(d.mode &^ linux.S_IFMT),
+ Mode: linux.FileMode(d.mode &^ linux.S_IFMT),
+ ForSyntheticMountpoint: forSyntheticMountpoint,
}); err != nil {
return err
}
@@ -271,7 +276,7 @@ func (d *dentry) copyUpLocked(ctx context.Context) error {
}
if upperStat.Mask&linux.STATX_INO == 0 {
cleanupUndoCopyUp()
- return syserror.EREMOTE
+ return linuxerr.EREMOTE
}
atomic.StoreUint32(&d.devMajor, upperStat.DevMajor)
atomic.StoreUint32(&d.devMinor, upperStat.DevMinor)
@@ -349,7 +354,7 @@ func (d *dentry) copyXattrsLocked(ctx context.Context) error {
lowerXattrs, err := vfsObj.ListXattrAt(ctx, d.fs.creds, lowerPop, 0)
if err != nil {
- if err == syserror.EOPNOTSUPP {
+ if linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
// There are no guarantees as to the contents of lowerXattrs.
return nil
}
diff --git a/pkg/sentry/fsimpl/overlay/directory.go b/pkg/sentry/fsimpl/overlay/directory.go
index df4492346..ad3cdbb56 100644
--- a/pkg/sentry/fsimpl/overlay/directory.go
+++ b/pkg/sentry/fsimpl/overlay/directory.go
@@ -19,10 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
func (d *dentry) isDir() bool {
@@ -69,7 +69,7 @@ func (d *dentry) collectWhiteoutsForRmdirLocked(ctx context.Context) (map[string
return nil
}
// Non-whiteout file in the directory prevents rmdir.
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}))
if err != nil {
readdirErr = err
@@ -88,7 +88,7 @@ func (d *dentry) collectWhiteoutsForRmdirLocked(ctx context.Context) (map[string
}
if stat.RdevMajor != 0 || stat.RdevMinor != 0 {
// This file is a real character device, not a whiteout.
- readdirErr = syserror.ENOTEMPTY
+ readdirErr = linuxerr.ENOTEMPTY
return false
}
whiteouts[maybeWhiteoutName] = isUpper
@@ -256,7 +256,7 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in
switch whence {
case linux.SEEK_SET:
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset == 0 {
// Ensure that the next call to fd.IterDirents() calls
@@ -268,13 +268,13 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in
case linux.SEEK_CUR:
offset += fd.off
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Don't clear fd.dirents in this case, even if offset == 0.
fd.off = offset
return fd.off, nil
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index 6b6fa0bd5..3b3dcf836 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -21,13 +21,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// _OVL_XATTR_PREFIX is an extended attribute key prefix to identify overlayfs
@@ -86,7 +86,7 @@ func putDentrySlice(ds *[]*dentry) {
// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
//
-// +checklocks:fs.renameMu
+// +checklocksrelease:fs.renameMu
func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
if *dsp == nil {
@@ -112,7 +112,7 @@ func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*
putDentrySlice(*dsp)
}
-// +checklocks:fs.renameMu
+// +checklocksrelease:fs.renameMu
func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
if *ds == nil {
fs.renameMu.Unlock()
@@ -137,7 +137,7 @@ func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*de
// * !rp.Done().
func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, lookupLayer, error) {
if !d.isDir() {
- return nil, lookupLayerNone, syserror.ENOTDIR
+ return nil, lookupLayerNone, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, lookupLayerNone, err
@@ -218,7 +218,7 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str
Start: parentVD,
Path: childPath,
}, &vfs.GetDentryOptions{})
- if err == syserror.ENOENT || err == syserror.ENAMETOOLONG {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENAMETOOLONG, err) {
// The file doesn't exist on this layer. Proceed to the next one.
return true
}
@@ -245,7 +245,7 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str
return false
}
if stat.Mask&mask != mask {
- lookupErr = syserror.EREMOTE
+ lookupErr = linuxerr.EREMOTE
return false
}
@@ -313,7 +313,7 @@ func (fs *filesystem) lookupLocked(ctx context.Context, parent *dentry, name str
}
if !topLookupLayer.existsInOverlay() {
child.destroyLocked(ctx)
- return nil, topLookupLayer, syserror.ENOENT
+ return nil, topLookupLayer, linuxerr.ENOENT
}
// Device and inode numbers were copied from the topmost layer above. Remap
@@ -352,7 +352,7 @@ func (fs *filesystem) lookupLayerLocked(ctx context.Context, parent *dentry, nam
}, &vfs.StatOptions{
Mask: linux.STATX_TYPE,
})
- if err == syserror.ENOENT || err == syserror.ENAMETOOLONG {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENAMETOOLONG, err) {
// The file doesn't exist on this layer. Proceed to the next
// one.
return true
@@ -365,7 +365,7 @@ func (fs *filesystem) lookupLayerLocked(ctx context.Context, parent *dentry, nam
// Linux's overlayfs tends to return EREMOTE in cases where a file
// is unusable for reasons that are not better captured by another
// errno.
- lookupErr = syserror.EREMOTE
+ lookupErr = linuxerr.EREMOTE
return false
}
if isWhiteout(&stat) {
@@ -437,7 +437,7 @@ func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving
d = next
}
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -457,18 +457,26 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
d = next
}
if rp.MustBeDir() && !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
+type createType int
+
+const (
+ createNonDirectory createType = iota
+ createDirectory
+ createSyntheticMountpoint
+)
+
// doCreateAt checks that creating a file at rp is permitted, then invokes
// create to do so.
//
// Preconditions:
// * !rp.Done().
// * For the final path component in rp, !rp.ShouldFollowSymlink().
-func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir bool, create func(parent *dentry, name string, haveUpperWhiteout bool) error) error {
+func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, ct createType, create func(parent *dentry, name string, haveUpperWhiteout bool) error) error {
var ds *[]*dentry
fs.renameMu.RLock()
defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
@@ -479,10 +487,10 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
}
name := rp.Component()
if name == "." || name == ".." {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if parent.vfsd.IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := parent.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
@@ -494,18 +502,18 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
// Determine if a file already exists at name.
if _, ok := parent.children[name]; ok {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
childLayer, err := fs.lookupLayerLocked(ctx, parent, name)
if err != nil {
return err
}
if childLayer.existsInOverlay() {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- if !dir && rp.MustBeDir() {
- return syserror.ENOENT
+ if ct == createNonDirectory && rp.MustBeDir() {
+ return linuxerr.ENOENT
}
mnt := rp.Mount()
@@ -518,7 +526,7 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
}
// Ensure that the parent directory is copied-up so that we can create the
// new file in the upper layer.
- if err := parent.copyUpLocked(ctx); err != nil {
+ if err := parent.copyUpMaybeSyntheticMountpointLocked(ctx, ct == createSyntheticMountpoint); err != nil {
return err
}
@@ -529,7 +537,7 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
parent.dirents = nil
ev := linux.IN_CREATE
- if dir {
+ if ct != createNonDirectory {
ev |= linux.IN_ISDIR
}
parent.watches.Notify(ctx, name, uint32(ev), 0 /* cookie */, vfs.InodeEvent, false /* unlinked */)
@@ -592,7 +600,7 @@ func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, op
}
if opts.CheckSearchable {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -618,13 +626,13 @@ func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa
// LinkAt implements vfs.FilesystemImpl.LinkAt.
func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
- return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
+ return fs.doCreateAt(ctx, rp, createNonDirectory, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
if rp.Mount() != vd.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
old := vd.Dentry().Impl().(*dentry)
if old.isDir() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := old.copyUpLocked(ctx); err != nil {
return err
@@ -671,7 +679,11 @@ func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.
// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
- return fs.doCreateAt(ctx, rp, true /* dir */, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
+ ct := createDirectory
+ if opts.ForSyntheticMountpoint {
+ ct = createSyntheticMountpoint
+ }
+ return fs.doCreateAt(ctx, rp, ct, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
vfsObj := fs.vfsfs.VirtualFilesystem()
pop := vfs.PathOperation{
Root: parent.upperVD,
@@ -722,10 +734,10 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
// MknodAt implements vfs.FilesystemImpl.MknodAt.
func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
- return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
+ return fs.doCreateAt(ctx, rp, createNonDirectory, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
// Disallow attempts to create whiteouts.
if opts.Mode&linux.S_IFMT == linux.S_IFCHR && opts.DevMajor == 0 && opts.DevMinor == 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
vfsObj := fs.vfsfs.VirtualFilesystem()
pop := vfs.PathOperation{
@@ -779,10 +791,10 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
start := rp.Start().Impl().(*dentry)
if rp.Done() {
if mayCreate && rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
if start.isRegularFile() && mayWrite {
if err := start.copyUpLocked(ctx); err != nil {
@@ -806,12 +818,12 @@ afterTrailingSymlink:
}
// Reject attempts to open directories with O_CREAT.
if mayCreate && rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
// Determine whether or not we need to create a file.
parent.dirMu.Lock()
child, topLookupLayer, err := fs.stepLocked(ctx, rp, parent, false /* mayFollowSymlinks */, &ds)
- if err == syserror.ENOENT && mayCreate {
+ if linuxerr.Equals(linuxerr.ENOENT, err) && mayCreate {
fd, err := fs.createAndOpenLocked(ctx, rp, parent, &opts, &ds, topLookupLayer == lookupLayerUpperWhiteout)
parent.dirMu.Unlock()
return fd, err
@@ -822,7 +834,7 @@ afterTrailingSymlink:
}
// Open existing child or follow symlink.
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
if child.isSymlink() && rp.ShouldFollowSymlink() {
target, err := child.readlink(ctx)
@@ -836,7 +848,7 @@ afterTrailingSymlink:
goto afterTrailingSymlink
}
if rp.MustBeDir() && !child.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if child.isRegularFile() && mayWrite {
if err := child.copyUpLocked(ctx); err != nil {
@@ -864,14 +876,14 @@ func (d *dentry) openCopiedUp(ctx context.Context, rp *vfs.ResolvingPath, opts *
if ftype == linux.S_IFDIR {
// Can't open directories with O_CREAT.
if opts.Flags&linux.O_CREAT != 0 {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
// Can't open directories writably.
if ats.MayWrite() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if opts.Flags&linux.O_DIRECT != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
fd := &directoryFD{}
fd.LockFD.Init(&d.locks)
@@ -918,7 +930,7 @@ func (fs *filesystem) createAndOpenLocked(ctx context.Context, rp *vfs.Resolving
return nil, err
}
if parent.vfsd.IsDead() {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
mnt := rp.Mount()
if err := mnt.CheckBeginWrite(); err != nil {
@@ -1027,19 +1039,19 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
if opts.Flags&^linux.RENAME_NOREPLACE != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
newName := rp.Component()
if newName == "." || newName == ".." {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
mnt := rp.Mount()
if mnt != oldParentVD.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := mnt.CheckBeginWrite(); err != nil {
return err
@@ -1064,7 +1076,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
if renamed.isDir() {
if renamed == newParent || genericIsAncestorDentry(renamed, newParent) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if oldParent != newParent {
if err := renamed.checkPermissions(creds, vfs.MayWrite); err != nil {
@@ -1073,7 +1085,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
} else {
if opts.MustBeDir || rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
@@ -1085,7 +1097,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
defer newParent.dirMu.Unlock()
}
if newParent.vfsd.IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
var (
replaced *dentry
@@ -1094,20 +1106,20 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
whiteouts map[string]bool
)
replaced, replacedLayer, err = fs.getChildLocked(ctx, newParent, newName, &ds)
- if err != nil && err != syserror.ENOENT {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOENT, err) {
return err
}
if replaced != nil {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
replacedVFSD = &replaced.vfsd
if replaced.isDir() {
if !renamed.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if genericIsAncestorDentry(replaced, renamed) {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
replaced.dirMu.Lock()
defer replaced.dirMu.Unlock()
@@ -1117,7 +1129,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
} else {
if rp.MustBeDir() || renamed.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
}
@@ -1177,7 +1189,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
Root: replaced.upperVD,
Start: replaced.upperVD,
Path: fspath.Parse(whiteoutName),
- }); err != nil && err != syserror.EEXIST {
+ }); err != nil && !linuxerr.Equals(linuxerr.EEXIST, err) {
panic(fmt.Sprintf("unrecoverable overlayfs inconsistency: failed to recreate deleted whiteout after RenameAt failure: %v", err))
}
}
@@ -1285,10 +1297,10 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
defer rp.Mount().EndWrite()
name := rp.Component()
if name == "." {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if name == ".." {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
vfsObj := rp.VirtualFilesystem()
mntns := vfs.MountNamespaceFromContext(ctx)
@@ -1309,7 +1321,7 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
return err
}
if !child.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
if err := parent.mayDelete(rp.Credentials(), child); err != nil {
return err
@@ -1344,7 +1356,7 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
Root: child.upperVD,
Start: child.upperVD,
Path: fspath.Parse(whiteoutName),
- }); err != nil && err != syserror.EEXIST {
+ }); err != nil && !linuxerr.Equals(linuxerr.EEXIST, err) {
panic(fmt.Sprintf("unrecoverable overlayfs inconsistency: failed to recreate deleted whiteout after RmdirAt failure: %v", err))
}
}
@@ -1476,7 +1488,7 @@ func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu
// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
- return fs.doCreateAt(ctx, rp, false /* dir */, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
+ return fs.doCreateAt(ctx, rp, createNonDirectory, func(parent *dentry, childName string, haveUpperWhiteout bool) error {
vfsObj := fs.vfsfs.VirtualFilesystem()
pop := vfs.PathOperation{
Root: parent.upperVD,
@@ -1532,10 +1544,10 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error
defer rp.Mount().EndWrite()
name := rp.Component()
if name == "." || name == ".." {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
vfsObj := rp.VirtualFilesystem()
mntns := vfs.MountNamespaceFromContext(ctx)
@@ -1556,7 +1568,7 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error
return err
}
if child.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if err := parent.mayDelete(rp.Credentials(), child); err != nil {
return err
@@ -1658,7 +1670,7 @@ func (fs *filesystem) getXattr(ctx context.Context, d *dentry, creds *auth.Crede
// Return EOPNOTSUPP when fetching an overlay attribute.
// See fs/overlayfs/super.c:ovl_own_xattr_get().
if isOverlayXattr(opts.Name) {
- return "", syserror.EOPNOTSUPP
+ return "", linuxerr.EOPNOTSUPP
}
// Analogous to fs/overlayfs/super.c:ovl_other_xattr_get().
@@ -1696,7 +1708,7 @@ func (fs *filesystem) setXattrLocked(ctx context.Context, d *dentry, mnt *vfs.Mo
// Return EOPNOTSUPP when setting an overlay attribute.
// See fs/overlayfs/super.c:ovl_own_xattr_set().
if isOverlayXattr(opts.Name) {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
// Analogous to fs/overlayfs/super.c:ovl_other_xattr_set().
@@ -1741,7 +1753,7 @@ func (fs *filesystem) removeXattrLocked(ctx context.Context, d *dentry, mnt *vfs
// Linux passes the remove request to xattr_handler->set.
// See fs/xattr.c:vfs_removexattr().
if isOverlayXattr(name) {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
if err := mnt.CheckBeginWrite(); err != nil {
diff --git a/pkg/sentry/fsimpl/overlay/overlay.go b/pkg/sentry/fsimpl/overlay/overlay.go
index 454c20d4f..46d9f1f1d 100644
--- a/pkg/sentry/fsimpl/overlay/overlay.go
+++ b/pkg/sentry/fsimpl/overlay/overlay.go
@@ -40,13 +40,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Name is the default filesystem name.
@@ -135,7 +135,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
fsopts, ok := fsoptsRaw.(FilesystemOptions)
if fsoptsRaw != nil && !ok {
ctx.Infof("overlay.FilesystemType.GetFilesystem: GetFilesystemOptions.InternalData has type %T, wanted overlay.FilesystemOptions or nil", fsoptsRaw)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
vfsroot := vfs.RootFromContext(ctx)
if vfsroot.Ok() {
@@ -145,7 +145,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if upperPathname, ok := mopts["upperdir"]; ok {
if fsopts.UpperRoot.Ok() {
ctx.Infof("overlay.FilesystemType.GetFilesystem: both upperdir and FilesystemOptions.UpperRoot are specified")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
delete(mopts, "upperdir")
// Linux overlayfs also requires a workdir when upperdir is
@@ -154,7 +154,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
upperPath := fspath.Parse(upperPathname)
if !upperPath.Absolute {
ctx.Infof("overlay.FilesystemType.GetFilesystem: upperdir %q must be absolute", upperPathname)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
upperRoot, err := vfsObj.GetDentryAt(ctx, creds, &vfs.PathOperation{
Root: vfsroot,
@@ -181,7 +181,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if lowerPathnamesStr, ok := mopts["lowerdir"]; ok {
if len(fsopts.LowerRoots) != 0 {
ctx.Infof("overlay.FilesystemType.GetFilesystem: both lowerdir and FilesystemOptions.LowerRoots are specified")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
delete(mopts, "lowerdir")
lowerPathnames := strings.Split(lowerPathnamesStr, ":")
@@ -189,7 +189,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
lowerPath := fspath.Parse(lowerPathname)
if !lowerPath.Absolute {
ctx.Infof("overlay.FilesystemType.GetFilesystem: lowerdir %q must be absolute", lowerPathname)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
lowerRoot, err := vfsObj.GetDentryAt(ctx, creds, &vfs.PathOperation{
Root: vfsroot,
@@ -216,21 +216,21 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if len(mopts) != 0 {
ctx.Infof("overlay.FilesystemType.GetFilesystem: unused options: %v", mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if len(fsopts.LowerRoots) == 0 {
ctx.Infof("overlay.FilesystemType.GetFilesystem: at least one lower layer is required")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if len(fsopts.LowerRoots) < 2 && !fsopts.UpperRoot.Ok() {
ctx.Infof("overlay.FilesystemType.GetFilesystem: at least two lower layers are required when no upper layer is present")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
const maxLowerLayers = 500 // Linux: fs/overlay/super.c:OVL_MAX_STACK
if len(fsopts.LowerRoots) > maxLowerLayers {
ctx.Infof("overlay.FilesystemType.GetFilesystem: %d lower layers specified, maximum %d", len(fsopts.LowerRoots), maxLowerLayers)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Take extra references held by the filesystem.
@@ -277,13 +277,13 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if rootStat.Mask&rootStatMask != rootStatMask {
root.destroyLocked(ctx)
fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EREMOTE
+ return nil, nil, linuxerr.EREMOTE
}
if isWhiteout(&rootStat) {
ctx.Infof("overlay.FilesystemType.GetFilesystem: filesystem root is a whiteout")
root.destroyLocked(ctx)
fs.vfsfs.DecRef(ctx)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
root.mode = uint32(rootStat.Mode)
root.uid = rootStat.UID
diff --git a/pkg/sentry/fsimpl/overlay/regular_file.go b/pkg/sentry/fsimpl/overlay/regular_file.go
index 82491a0f8..156ffeaeb 100644
--- a/pkg/sentry/fsimpl/overlay/regular_file.go
+++ b/pkg/sentry/fsimpl/overlay/regular_file.go
@@ -19,6 +19,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -26,7 +27,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -415,7 +415,7 @@ func (fd *regularFileFD) ensureMappable(ctx context.Context, opts *memmap.MMapOp
// Only permit mmap of regular files, since other file types may have
// unpredictable behavior when mmapped (e.g. /dev/zero).
if atomic.LoadUint32(&d.mode)&linux.S_IFMT != linux.S_IFREG {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
// Get a Mappable for the current top layer.
diff --git a/pkg/sentry/fsimpl/pipefs/BUILD b/pkg/sentry/fsimpl/pipefs/BUILD
index 278ee3c92..a50510031 100644
--- a/pkg/sentry/fsimpl/pipefs/BUILD
+++ b/pkg/sentry/fsimpl/pipefs/BUILD
@@ -9,6 +9,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/sentry/fsimpl/kernfs",
@@ -16,6 +17,5 @@ go_library(
"//pkg/sentry/kernel/pipe",
"//pkg/sentry/kernel/time",
"//pkg/sentry/vfs",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/fsimpl/pipefs/pipefs.go b/pkg/sentry/fsimpl/pipefs/pipefs.go
index 08aedc2ad..af09195a7 100644
--- a/pkg/sentry/fsimpl/pipefs/pipefs.go
+++ b/pkg/sentry/fsimpl/pipefs/pipefs.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// +stateify savable
@@ -152,7 +152,7 @@ func (i *inode) SetStat(ctx context.Context, vfsfs *vfs.Filesystem, creds *auth.
if opts.Stat.Mask == 0 {
return nil
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Open implements kernfs.Inode.Open.
diff --git a/pkg/sentry/fsimpl/proc/BUILD b/pkg/sentry/fsimpl/proc/BUILD
index 2b628bd55..95cfbdc42 100644
--- a/pkg/sentry/fsimpl/proc/BUILD
+++ b/pkg/sentry/fsimpl/proc/BUILD
@@ -81,6 +81,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/refs",
@@ -101,7 +102,6 @@ go_library(
"//pkg/sentry/usage",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/tcpip/header",
"//pkg/tcpip/network/ipv4",
"//pkg/usermem",
@@ -119,6 +119,7 @@ go_test(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/sentry/contexttest",
"//pkg/sentry/fsimpl/testutil",
@@ -127,7 +128,6 @@ go_test(
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsimpl/proc/filesystem.go b/pkg/sentry/fsimpl/proc/filesystem.go
index ce8f55b1f..f2697c12d 100644
--- a/pkg/sentry/fsimpl/proc/filesystem.go
+++ b/pkg/sentry/fsimpl/proc/filesystem.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -76,7 +76,7 @@ func (ft FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.VirtualF
maxCachedDentries, err = strconv.ParseUint(str, 10, 64)
if err != nil {
ctx.Warningf("proc.FilesystemType.GetFilesystem: invalid dentry cache limit: dentry_cache_limit=%s", str)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/fsimpl/proc/subtasks.go b/pkg/sentry/fsimpl/proc/subtasks.go
index c53cc0122..e04ae6660 100644
--- a/pkg/sentry/fsimpl/proc/subtasks.go
+++ b/pkg/sentry/fsimpl/proc/subtasks.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// subtasksInode represents the inode for /proc/[pid]/task/ directory.
@@ -70,15 +70,15 @@ func (fs *filesystem) newSubtasks(ctx context.Context, task *kernel.Task, pidns
func (i *subtasksInode) Lookup(ctx context.Context, name string) (kernfs.Inode, error) {
tid, err := strconv.ParseUint(name, 10, 32)
if err != nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
subTask := i.pidns.TaskWithID(kernel.ThreadID(tid))
if subTask == nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
if subTask.ThreadGroup() != i.task.ThreadGroup() {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
return i.fs.newTaskInode(ctx, subTask, i.pidns, false, i.cgroupControllers)
}
@@ -87,7 +87,7 @@ func (i *subtasksInode) Lookup(ctx context.Context, name string) (kernfs.Inode,
func (i *subtasksInode) IterDirents(ctx context.Context, mnt *vfs.Mount, cb vfs.IterDirentsCallback, offset, relOffset int64) (int64, error) {
tasks := i.task.ThreadGroup().MemberIDs(i.pidns)
if len(tasks) == 0 {
- return offset, syserror.ENOENT
+ return offset, linuxerr.ENOENT
}
if relOffset >= int64(len(tasks)) {
return offset, nil
@@ -123,7 +123,7 @@ type subtasksFD struct {
func (fd *subtasksFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
if fd.task.ExitState() >= kernel.TaskExitZombie {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
return fd.GenericDirectoryFD.IterDirents(ctx, cb)
}
@@ -131,7 +131,7 @@ func (fd *subtasksFD) IterDirents(ctx context.Context, cb vfs.IterDirentsCallbac
// Seek implements vfs.FileDescriptionImpl.Seek.
func (fd *subtasksFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
if fd.task.ExitState() >= kernel.TaskExitZombie {
- return 0, syserror.ENOENT
+ return 0, linuxerr.ENOENT
}
return fd.GenericDirectoryFD.Seek(ctx, offset, whence)
}
@@ -139,7 +139,7 @@ func (fd *subtasksFD) Seek(ctx context.Context, offset int64, whence int32) (int
// Stat implements vfs.FileDescriptionImpl.Stat.
func (fd *subtasksFD) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
if fd.task.ExitState() >= kernel.TaskExitZombie {
- return linux.Statx{}, syserror.ENOENT
+ return linux.Statx{}, linuxerr.ENOENT
}
return fd.GenericDirectoryFD.Stat(ctx, opts)
}
@@ -147,7 +147,7 @@ func (fd *subtasksFD) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Sta
// SetStat implements vfs.FileDescriptionImpl.SetStat.
func (fd *subtasksFD) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
if fd.task.ExitState() >= kernel.TaskExitZombie {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
return fd.GenericDirectoryFD.SetStat(ctx, opts)
}
@@ -180,7 +180,7 @@ func (i *subtasksInode) Stat(ctx context.Context, vsfs *vfs.Filesystem, opts vfs
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*subtasksInode) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// DecRef implements kernfs.Inode.DecRef.
diff --git a/pkg/sentry/fsimpl/proc/task.go b/pkg/sentry/fsimpl/proc/task.go
index d05cc1508..f54811edf 100644
--- a/pkg/sentry/fsimpl/proc/task.go
+++ b/pkg/sentry/fsimpl/proc/task.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// taskInode represents the inode for /proc/PID/ directory.
@@ -49,7 +49,7 @@ var _ kernfs.Inode = (*taskInode)(nil)
func (fs *filesystem) newTaskInode(ctx context.Context, task *kernel.Task, pidns *kernel.PIDNamespace, isThreadGroup bool, fakeCgroupControllers map[string]string) (kernfs.Inode, error) {
if task.ExitState() == kernel.TaskExitDead {
- return nil, syserror.ESRCH
+ return nil, linuxerr.ESRCH
}
contents := map[string]kernfs.Inode{
@@ -65,8 +65,8 @@ func (fs *filesystem) newTaskInode(ctx context.Context, task *kernel.Task, pidns
"io": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0400, newIO(task, isThreadGroup)),
"maps": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mapsData{task: task}),
"mem": fs.newMemInode(ctx, task, fs.NextIno(), 0400),
- "mountinfo": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mountInfoData{task: task}),
- "mounts": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mountsData{task: task}),
+ "mountinfo": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mountInfoData{fs: fs, task: task}),
+ "mounts": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &mountsData{fs: fs, task: task}),
"net": fs.newTaskNetDir(ctx, task),
"ns": fs.newTaskOwnedDir(ctx, task, fs.NextIno(), 0511, map[string]kernfs.Inode{
"net": fs.newNamespaceSymlink(ctx, task, fs.NextIno(), "net"),
@@ -78,7 +78,7 @@ func (fs *filesystem) newTaskInode(ctx context.Context, task *kernel.Task, pidns
"smaps": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &smapsData{task: task}),
"stat": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &taskStatData{task: task, pidns: pidns, tgstats: isThreadGroup}),
"statm": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &statmData{task: task}),
- "status": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0444, &statusData{task: task, pidns: pidns}),
+ "status": fs.newStatusInode(ctx, task, pidns, fs.NextIno(), 0444),
"uid_map": fs.newTaskOwnedInode(ctx, task, fs.NextIno(), 0644, &idMapData{task: task, gids: false}),
}
if isThreadGroup {
@@ -124,7 +124,7 @@ func (i *taskInode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.D
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*taskInode) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// DecRef implements kernfs.Inode.DecRef.
diff --git a/pkg/sentry/fsimpl/proc/task_fds.go b/pkg/sentry/fsimpl/proc/task_fds.go
index 4718fac7a..5c6412fc0 100644
--- a/pkg/sentry/fsimpl/proc/task_fds.go
+++ b/pkg/sentry/fsimpl/proc/task_fds.go
@@ -22,11 +22,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
func getTaskFD(t *kernel.Task, fd int32) (*vfs.FileDescription, kernel.FDFlags) {
@@ -42,12 +42,12 @@ func getTaskFD(t *kernel.Task, fd int32) (*vfs.FileDescription, kernel.FDFlags)
return file, flags
}
-func taskFDExists(ctx context.Context, t *kernel.Task, fd int32) bool {
+func taskFDExists(ctx context.Context, fs *filesystem, t *kernel.Task, fd int32) bool {
file, _ := getTaskFD(t, fd)
if file == nil {
return false
}
- file.DecRef(ctx)
+ fs.SafeDecRefFD(ctx, file)
return true
}
@@ -142,11 +142,11 @@ func (i *fdDirInode) IterDirents(ctx context.Context, mnt *vfs.Mount, cb vfs.Ite
func (i *fdDirInode) Lookup(ctx context.Context, name string) (kernfs.Inode, error) {
fdInt, err := strconv.ParseInt(name, 10, 32)
if err != nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
fd := int32(fdInt)
- if !taskFDExists(ctx, i.task, fd) {
- return nil, syserror.ENOENT
+ if !taskFDExists(ctx, i.fs, i.task, fd) {
+ return nil, linuxerr.ENOENT
}
return i.fs.newFDSymlink(ctx, i.task, fd, i.fs.NextIno()), nil
}
@@ -198,6 +198,7 @@ type fdSymlink struct {
kernfs.InodeNoopRefCount
kernfs.InodeSymlink
+ fs *filesystem
task *kernel.Task
fd int32
}
@@ -206,6 +207,7 @@ var _ kernfs.Inode = (*fdSymlink)(nil)
func (fs *filesystem) newFDSymlink(ctx context.Context, task *kernel.Task, fd int32, ino uint64) kernfs.Inode {
inode := &fdSymlink{
+ fs: fs,
task: task,
fd: fd,
}
@@ -216,11 +218,11 @@ func (fs *filesystem) newFDSymlink(ctx context.Context, task *kernel.Task, fd in
func (s *fdSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error) {
file, _ := getTaskFD(s.task, s.fd)
if file == nil {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
- defer file.DecRef(ctx)
+ defer s.fs.SafeDecRefFD(ctx, file)
root := vfs.RootFromContext(ctx)
- defer root.DecRef(ctx)
+ defer s.fs.SafeDecRef(ctx, root)
// Note: it's safe to reenter kernfs from Readlink if needed to resolve path.
return s.task.Kernel().VFS().PathnameWithDeleted(ctx, root, file.VirtualDentry())
@@ -229,9 +231,9 @@ func (s *fdSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error)
func (s *fdSymlink) Getlink(ctx context.Context, mnt *vfs.Mount) (vfs.VirtualDentry, string, error) {
file, _ := getTaskFD(s.task, s.fd)
if file == nil {
- return vfs.VirtualDentry{}, "", syserror.ENOENT
+ return vfs.VirtualDentry{}, "", linuxerr.ENOENT
}
- defer file.DecRef(ctx)
+ defer s.fs.SafeDecRefFD(ctx, file)
vd := file.VirtualDentry()
vd.IncRef()
return vd, "", nil
@@ -239,7 +241,7 @@ func (s *fdSymlink) Getlink(ctx context.Context, mnt *vfs.Mount) (vfs.VirtualDen
// Valid implements kernfs.Inode.Valid.
func (s *fdSymlink) Valid(ctx context.Context) bool {
- return taskFDExists(ctx, s.task, s.fd)
+ return taskFDExists(ctx, s.fs, s.task, s.fd)
}
// fdInfoDirInode represents the inode for /proc/[pid]/fdinfo directory.
@@ -276,13 +278,14 @@ func (fs *filesystem) newFDInfoDirInode(ctx context.Context, task *kernel.Task)
func (i *fdInfoDirInode) Lookup(ctx context.Context, name string) (kernfs.Inode, error) {
fdInt, err := strconv.ParseInt(name, 10, 32)
if err != nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
fd := int32(fdInt)
- if !taskFDExists(ctx, i.task, fd) {
- return nil, syserror.ENOENT
+ if !taskFDExists(ctx, i.fs, i.task, fd) {
+ return nil, linuxerr.ENOENT
}
data := &fdInfoData{
+ fs: i.fs,
task: i.task,
fd: fd,
}
@@ -316,6 +319,7 @@ func (i *fdInfoDirInode) DecRef(ctx context.Context) {
type fdInfoData struct {
kernfs.DynamicBytesFile
+ fs *filesystem
task *kernel.Task
fd int32
}
@@ -326,9 +330,9 @@ var _ dynamicInode = (*fdInfoData)(nil)
func (d *fdInfoData) Generate(ctx context.Context, buf *bytes.Buffer) error {
file, descriptorFlags := getTaskFD(d.task, d.fd)
if file == nil {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
- defer file.DecRef(ctx)
+ defer d.fs.SafeDecRefFD(ctx, file)
// TODO(b/121266871): Include pos, locks, and other data. For now we only
// have flags.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
@@ -339,5 +343,5 @@ func (d *fdInfoData) Generate(ctx context.Context, buf *bytes.Buffer) error {
// Valid implements kernfs.Inode.Valid.
func (d *fdInfoData) Valid(ctx context.Context) bool {
- return taskFDExists(ctx, d.task, d.fd)
+ return taskFDExists(ctx, d.fs, d.task, d.fd)
}
diff --git a/pkg/sentry/fsimpl/proc/task_files.go b/pkg/sentry/fsimpl/proc/task_files.go
index b294dfd6a..d3f9cf489 100644
--- a/pkg/sentry/fsimpl/proc/task_files.go
+++ b/pkg/sentry/fsimpl/proc/task_files.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
@@ -32,7 +33,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -40,7 +40,7 @@ import (
// Linux 3.18, the limit is five lines." - user_namespaces(7)
const maxIDMapLines = 5
-// mm gets the kernel task's MemoryManager. No additional reference is taken on
+// getMM gets the kernel task's MemoryManager. No additional reference is taken on
// mm here. This is safe because MemoryManager.destroy is required to leave the
// MemoryManager in a state where it's still usable as a DynamicBytesSource.
func getMM(task *kernel.Task) *mm.MemoryManager {
@@ -70,9 +70,9 @@ func getMMIncRef(task *kernel.Task) (*mm.MemoryManager, error) {
func checkTaskState(t *kernel.Task) error {
switch t.ExitState() {
case kernel.TaskExitZombie:
- return syserror.EACCES
+ return linuxerr.EACCES
case kernel.TaskExitDead:
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
return nil
}
@@ -109,7 +109,7 @@ var _ dynamicInode = (*auxvData)(nil)
// Generate implements vfs.DynamicBytesSource.Generate.
func (d *auxvData) Generate(ctx context.Context, buf *bytes.Buffer) error {
if d.task.ExitState() == kernel.TaskExitDead {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
m, err := getMMIncRef(d.task)
if err != nil {
@@ -159,7 +159,7 @@ var _ dynamicInode = (*cmdlineData)(nil)
// Generate implements vfs.DynamicBytesSource.Generate.
func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
if d.task.ExitState() == kernel.TaskExitDead {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
m, err := getMMIncRef(d.task)
if err != nil {
@@ -227,7 +227,7 @@ func (d *cmdlineData) Generate(ctx context.Context, buf *bytes.Buffer) error {
if int(arEnvv.Length()) > remaining {
end, ok := arEnvv.Start.AddLength(uint64(remaining))
if !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
arEnvv.End = end
}
@@ -325,7 +325,7 @@ func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset in
// the file ..." - user_namespaces(7)
srclen := src.NumBytes()
if srclen >= hostarch.PageSize || offset != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
b := make([]byte, srclen)
if _, err := src.CopyIn(ctx, b); err != nil {
@@ -345,7 +345,7 @@ func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset in
}
lines := bytes.SplitN(b, []byte("\n"), maxIDMapLines+1)
if len(lines) > maxIDMapLines {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
entries := make([]auth.IDMapEntry, len(lines))
@@ -353,7 +353,7 @@ func (d *idMapData) Write(ctx context.Context, src usermem.IOSequence, offset in
var e auth.IDMapEntry
_, err := fmt.Sscan(string(l), &e.FirstID, &e.FirstParentID, &e.Length)
if err != nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
entries[i] = e
}
@@ -408,7 +408,7 @@ func (f *memInode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.De
// Permission to read this file is governed by PTRACE_MODE_ATTACH_FSCREDS
// Since we dont implement setfsuid/setfsgid we can just use PTRACE_MODE_ATTACH
if !kernel.ContextCanTrace(ctx, f.task, true) {
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
if err := checkTaskState(f.task); err != nil {
return nil, err
@@ -422,7 +422,7 @@ func (f *memInode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.De
// SetStat implements kernfs.Inode.SetStat.
func (*memInode) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
var _ vfs.FileDescriptionImpl = (*memFD)(nil)
@@ -461,10 +461,10 @@ func (fd *memFD) Seek(ctx context.Context, offset int64, whence int32) (int64, e
case linux.SEEK_CUR:
offset += fd.offset
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
fd.offset = offset
return offset, nil
@@ -485,12 +485,12 @@ func (fd *memFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64
n, readErr := m.CopyIn(ctx, hostarch.Addr(offset), buf, usermem.IOOpts{IgnorePermissions: true})
if n > 0 {
if _, err := dst.CopyOut(ctx, buf[:n]); err != nil {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
return int64(n), nil
}
if readErr != nil {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
return 0, nil
}
@@ -512,7 +512,7 @@ func (fd *memFD) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, e
// SetStat implements vfs.FileDescriptionImpl.SetStat.
func (fd *memFD) SetStat(context.Context, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Release implements vfs.FileDescriptionImpl.Release.
@@ -608,12 +608,10 @@ func (s *taskStatData) Generate(ctx context.Context, buf *bytes.Buffer) error {
fmt.Fprintf(buf, "%d ", linux.ClockTFromDuration(s.task.StartTime().Sub(s.task.Kernel().Timekeeper().BootTime())))
var vss, rss uint64
- s.task.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- }
- })
+ if mm := getMM(s.task); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ }
fmt.Fprintf(buf, "%d %d ", vss, rss/hostarch.PageSize)
// rsslim.
@@ -649,63 +647,159 @@ var _ dynamicInode = (*statmData)(nil)
// Generate implements vfs.DynamicBytesSource.Generate.
func (s *statmData) Generate(ctx context.Context, buf *bytes.Buffer) error {
var vss, rss uint64
- s.task.WithMuLocked(func(t *kernel.Task) {
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- }
- })
-
+ if mm := getMM(s.task); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ }
fmt.Fprintf(buf, "%d %d 0 0 0 0 0\n", vss/hostarch.PageSize, rss/hostarch.PageSize)
return nil
}
-// statusData implements vfs.DynamicBytesSource for /proc/[pid]/status.
+// statusInode implements kernfs.Inode for /proc/[pid]/status.
//
// +stateify savable
-type statusData struct {
- kernfs.DynamicBytesFile
+type statusInode struct {
+ kernfs.InodeAttrs
+ kernfs.InodeNoStatFS
+ kernfs.InodeNoopRefCount
+ kernfs.InodeNotDirectory
+ kernfs.InodeNotSymlink
task *kernel.Task
pidns *kernel.PIDNamespace
+ locks vfs.FileLocks
+}
+
+// statusFD implements vfs.FileDescriptionImpl and vfs.DynamicByteSource for
+// /proc/[pid]/status.
+//
+// +stateify savable
+type statusFD struct {
+ statusFDLowerBase
+ vfs.DynamicBytesFileDescriptionImpl
+ vfs.LockFD
+
+ vfsfd vfs.FileDescription
+
+ inode *statusInode
+ task *kernel.Task
+ pidns *kernel.PIDNamespace
+ userns *auth.UserNamespace // equivalent to struct file::f_cred::user_ns
}
-var _ dynamicInode = (*statusData)(nil)
+// statusFDLowerBase is a dumb hack to ensure that statusFD prefers
+// vfs.DynamicBytesFileDescriptionImpl methods to vfs.FileDescriptinDefaultImpl
+// methods.
+//
+// +stateify savable
+type statusFDLowerBase struct {
+ vfs.FileDescriptionDefaultImpl
+}
+
+func (fs *filesystem) newStatusInode(ctx context.Context, task *kernel.Task, pidns *kernel.PIDNamespace, ino uint64, perm linux.FileMode) kernfs.Inode {
+ // Note: credentials are overridden by taskOwnedInode.
+ inode := &statusInode{
+ task: task,
+ pidns: pidns,
+ }
+ inode.InodeAttrs.Init(ctx, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, ino, linux.ModeRegular|perm)
+ return &taskOwnedInode{Inode: inode, owner: task}
+}
+
+// Open implements kernfs.Inode.Open.
+func (s *statusInode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
+ fd := &statusFD{
+ inode: s,
+ task: s.task,
+ pidns: s.pidns,
+ userns: rp.Credentials().UserNamespace,
+ }
+ fd.LockFD.Init(&s.locks)
+ if err := fd.vfsfd.Init(fd, opts.Flags, rp.Mount(), d.VFSDentry(), &vfs.FileDescriptionOptions{}); err != nil {
+ return nil, err
+ }
+ fd.SetDataSource(fd)
+ return &fd.vfsfd, nil
+}
+
+// SetStat implements kernfs.Inode.SetStat.
+func (*statusInode) SetStat(ctx context.Context, vfsfs *vfs.Filesystem, creds *auth.Credentials, opts vfs.SetStatOptions) error {
+ return linuxerr.EPERM
+}
+
+// Release implements vfs.FileDescriptionImpl.Release.
+func (s *statusFD) Release(ctx context.Context) {
+}
+
+// Stat implements vfs.FileDescriptionImpl.Stat.
+func (s *statusFD) Stat(ctx context.Context, opts vfs.StatOptions) (linux.Statx, error) {
+ fs := s.vfsfd.VirtualDentry().Mount().Filesystem()
+ return s.inode.Stat(ctx, fs, opts)
+}
+
+// SetStat implements vfs.FileDescriptionImpl.SetStat.
+func (s *statusFD) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
+ return linuxerr.EPERM
+}
// Generate implements vfs.DynamicBytesSource.Generate.
-func (s *statusData) Generate(ctx context.Context, buf *bytes.Buffer) error {
+func (s *statusFD) Generate(ctx context.Context, buf *bytes.Buffer) error {
fmt.Fprintf(buf, "Name:\t%s\n", s.task.Name())
fmt.Fprintf(buf, "State:\t%s\n", s.task.StateStatus())
fmt.Fprintf(buf, "Tgid:\t%d\n", s.pidns.IDOfThreadGroup(s.task.ThreadGroup()))
fmt.Fprintf(buf, "Pid:\t%d\n", s.pidns.IDOfTask(s.task))
+
ppid := kernel.ThreadID(0)
if parent := s.task.Parent(); parent != nil {
ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup())
}
fmt.Fprintf(buf, "PPid:\t%d\n", ppid)
+
tpid := kernel.ThreadID(0)
if tracer := s.task.Tracer(); tracer != nil {
tpid = s.pidns.IDOfTask(tracer)
}
fmt.Fprintf(buf, "TracerPid:\t%d\n", tpid)
+
+ creds := s.task.Credentials()
+ ruid := creds.RealKUID.In(s.userns).OrOverflow()
+ euid := creds.EffectiveKUID.In(s.userns).OrOverflow()
+ suid := creds.SavedKUID.In(s.userns).OrOverflow()
+ rgid := creds.RealKGID.In(s.userns).OrOverflow()
+ egid := creds.EffectiveKGID.In(s.userns).OrOverflow()
+ sgid := creds.SavedKGID.In(s.userns).OrOverflow()
var fds int
var vss, rss, data uint64
s.task.WithMuLocked(func(t *kernel.Task) {
if fdTable := t.FDTable(); fdTable != nil {
fds = fdTable.CurrentMaxFDs()
}
- if mm := t.MemoryManager(); mm != nil {
- vss = mm.VirtualMemorySize()
- rss = mm.ResidentSetSize()
- data = mm.VirtualDataSize()
- }
})
+ if mm := getMM(s.task); mm != nil {
+ vss = mm.VirtualMemorySize()
+ rss = mm.ResidentSetSize()
+ data = mm.VirtualDataSize()
+ }
+ // Filesystem user/group IDs aren't implemented; effective UID/GID are used
+ // instead.
+ fmt.Fprintf(buf, "Uid:\t%d\t%d\t%d\t%d\n", ruid, euid, suid, euid)
+ fmt.Fprintf(buf, "Gid:\t%d\t%d\t%d\t%d\n", rgid, egid, sgid, egid)
fmt.Fprintf(buf, "FDSize:\t%d\n", fds)
+ buf.WriteString("Groups:\t ")
+ // There is a space between each pair of supplemental GIDs, as well as an
+ // unconditional trailing space that some applications actually depend on.
+ var sep string
+ for _, kgid := range creds.ExtraKGIDs {
+ fmt.Fprintf(buf, "%s%d", sep, kgid.In(s.userns).OrOverflow())
+ sep = " "
+ }
+ buf.WriteString(" \n")
+
fmt.Fprintf(buf, "VmSize:\t%d kB\n", vss>>10)
fmt.Fprintf(buf, "VmRSS:\t%d kB\n", rss>>10)
fmt.Fprintf(buf, "VmData:\t%d kB\n", data>>10)
+
fmt.Fprintf(buf, "Threads:\t%d\n", s.task.ThreadGroup().Count())
- creds := s.task.Credentials()
fmt.Fprintf(buf, "CapInh:\t%016x\n", creds.InheritableCaps)
fmt.Fprintf(buf, "CapPrm:\t%016x\n", creds.PermittedCaps)
fmt.Fprintf(buf, "CapEff:\t%016x\n", creds.EffectiveCaps)
@@ -762,7 +856,7 @@ var _ vfs.WritableDynamicBytesSource = (*oomScoreAdj)(nil)
// Generate implements vfs.DynamicBytesSource.Generate.
func (o *oomScoreAdj) Generate(ctx context.Context, buf *bytes.Buffer) error {
if o.task.ExitState() == kernel.TaskExitDead {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
fmt.Fprintf(buf, "%d\n", o.task.OOMScoreAdj())
return nil
@@ -784,7 +878,7 @@ func (o *oomScoreAdj) Write(ctx context.Context, src usermem.IOSequence, offset
}
if o.task.ExitState() == kernel.TaskExitDead {
- return 0, syserror.ESRCH
+ return 0, linuxerr.ESRCH
}
if err := o.task.SetOOMScoreAdj(v); err != nil {
return 0, err
@@ -802,13 +896,17 @@ type exeSymlink struct {
kernfs.InodeNoopRefCount
kernfs.InodeSymlink
+ fs *filesystem
task *kernel.Task
}
var _ kernfs.Inode = (*exeSymlink)(nil)
func (fs *filesystem) newExeSymlink(ctx context.Context, task *kernel.Task, ino uint64) kernfs.Inode {
- inode := &exeSymlink{task: task}
+ inode := &exeSymlink{
+ fs: fs,
+ task: task,
+ }
inode.Init(ctx, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, ino, linux.ModeSymlink|0777)
return inode
}
@@ -819,14 +917,14 @@ func (s *exeSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error)
if err != nil {
return "", err
}
- defer exec.DecRef(ctx)
+ defer s.fs.SafeDecRef(ctx, exec)
root := vfs.RootFromContext(ctx)
if !root.Ok() {
// It could have raced with process deletion.
- return "", syserror.ESRCH
+ return "", linuxerr.ESRCH
}
- defer root.DecRef(ctx)
+ defer s.fs.SafeDecRef(ctx, root)
vfsObj := exec.Mount().Filesystem().VirtualFilesystem()
name, _ := vfsObj.PathnameWithDeleted(ctx, root, exec)
@@ -836,31 +934,23 @@ func (s *exeSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error)
// Getlink implements kernfs.Inode.Getlink.
func (s *exeSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDentry, string, error) {
if !kernel.ContextCanTrace(ctx, s.task, false) {
- return vfs.VirtualDentry{}, "", syserror.EACCES
+ return vfs.VirtualDentry{}, "", linuxerr.EACCES
}
if err := checkTaskState(s.task); err != nil {
return vfs.VirtualDentry{}, "", err
}
- var err error
- var exec fsbridge.File
- s.task.WithMuLocked(func(t *kernel.Task) {
- mm := t.MemoryManager()
- if mm == nil {
- err = syserror.EACCES
- return
- }
+ mm := getMM(s.task)
+ if mm == nil {
+ return vfs.VirtualDentry{}, "", linuxerr.EACCES
+ }
- // The MemoryManager may be destroyed, in which case
- // MemoryManager.destroy will simply set the executable to nil
- // (with locks held).
- exec = mm.Executable()
- if exec == nil {
- err = syserror.ESRCH
- }
- })
- if err != nil {
- return vfs.VirtualDentry{}, "", err
+ // The MemoryManager may be destroyed, in which case
+ // MemoryManager.destroy will simply set the executable to nil
+ // (with locks held).
+ exec := mm.Executable()
+ if exec == nil {
+ return vfs.VirtualDentry{}, "", linuxerr.ESRCH
}
defer exec.DecRef(ctx)
@@ -878,13 +968,17 @@ type cwdSymlink struct {
kernfs.InodeNoopRefCount
kernfs.InodeSymlink
+ fs *filesystem
task *kernel.Task
}
var _ kernfs.Inode = (*cwdSymlink)(nil)
func (fs *filesystem) newCwdSymlink(ctx context.Context, task *kernel.Task, ino uint64) kernfs.Inode {
- inode := &cwdSymlink{task: task}
+ inode := &cwdSymlink{
+ fs: fs,
+ task: task,
+ }
inode.Init(ctx, task.Credentials(), linux.UNNAMED_MAJOR, fs.devMinor, ino, linux.ModeSymlink|0777)
return inode
}
@@ -895,14 +989,14 @@ func (s *cwdSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error)
if err != nil {
return "", err
}
- defer cwd.DecRef(ctx)
+ defer s.fs.SafeDecRef(ctx, cwd)
root := vfs.RootFromContext(ctx)
if !root.Ok() {
// It could have raced with process deletion.
- return "", syserror.ESRCH
+ return "", linuxerr.ESRCH
}
- defer root.DecRef(ctx)
+ defer s.fs.SafeDecRef(ctx, root)
vfsObj := cwd.Mount().Filesystem().VirtualFilesystem()
name, _ := vfsObj.PathnameWithDeleted(ctx, root, cwd)
@@ -912,7 +1006,7 @@ func (s *cwdSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error)
// Getlink implements kernfs.Inode.Getlink.
func (s *cwdSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDentry, string, error) {
if !kernel.ContextCanTrace(ctx, s.task, false) {
- return vfs.VirtualDentry{}, "", syserror.EACCES
+ return vfs.VirtualDentry{}, "", linuxerr.EACCES
}
if err := checkTaskState(s.task); err != nil {
return vfs.VirtualDentry{}, "", err
@@ -920,8 +1014,9 @@ func (s *cwdSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDent
cwd := s.task.FSContext().WorkingDirectoryVFS2()
if !cwd.Ok() {
// It could have raced with process deletion.
- return vfs.VirtualDentry{}, "", syserror.ESRCH
+ return vfs.VirtualDentry{}, "", linuxerr.ESRCH
}
+ // The reference is transferred to the caller.
return cwd, "", nil
}
@@ -931,6 +1026,7 @@ func (s *cwdSymlink) Getlink(ctx context.Context, _ *vfs.Mount) (vfs.VirtualDent
type mountInfoData struct {
kernfs.DynamicBytesFile
+ fs *filesystem
task *kernel.Task
}
@@ -951,7 +1047,7 @@ func (i *mountInfoData) Generate(ctx context.Context, buf *bytes.Buffer) error {
// Root has been destroyed. Don't try to read mounts.
return nil
}
- defer rootDir.DecRef(ctx)
+ defer i.fs.SafeDecRef(ctx, rootDir)
i.task.Kernel().VFS().GenerateProcMountInfo(ctx, rootDir, buf)
return nil
}
@@ -962,6 +1058,7 @@ func (i *mountInfoData) Generate(ctx context.Context, buf *bytes.Buffer) error {
type mountsData struct {
kernfs.DynamicBytesFile
+ fs *filesystem
task *kernel.Task
}
@@ -982,7 +1079,7 @@ func (i *mountsData) Generate(ctx context.Context, buf *bytes.Buffer) error {
// Root has been destroyed. Don't try to read mounts.
return nil
}
- defer rootDir.DecRef(ctx)
+ defer i.fs.SafeDecRef(ctx, rootDir)
i.task.Kernel().VFS().GenerateProcMounts(ctx, rootDir, buf)
return nil
}
@@ -1123,7 +1220,7 @@ func (d *taskCgroupData) Generate(ctx context.Context, buf *bytes.Buffer) error
// exit this file show a task is in no cgroups, which is incorrect. Instead,
// once a task has left its cgroups, we return an error.
if d.task.ExitState() >= kernel.TaskExitInitiated {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
d.task.GenerateProcTaskCgroup(buf)
diff --git a/pkg/sentry/fsimpl/proc/task_net.go b/pkg/sentry/fsimpl/proc/task_net.go
index 177cb828f..ab47ea5a7 100644
--- a/pkg/sentry/fsimpl/proc/task_net.go
+++ b/pkg/sentry/fsimpl/proc/task_net.go
@@ -23,6 +23,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
@@ -33,7 +34,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/header"
)
@@ -679,7 +679,7 @@ func (d *netSnmpData) Generate(ctx context.Context, buf *bytes.Buffer) error {
continue
}
if err := d.stack.Statistics(stat, line.prefix); err != nil {
- if err == syserror.EOPNOTSUPP {
+ if linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
log.Infof("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err)
} else {
log.Warningf("Failed to retrieve %s of /proc/net/snmp: %v", line.prefix, err)
diff --git a/pkg/sentry/fsimpl/proc/tasks.go b/pkg/sentry/fsimpl/proc/tasks.go
index cf905fae4..26d44744b 100644
--- a/pkg/sentry/fsimpl/proc/tasks.go
+++ b/pkg/sentry/fsimpl/proc/tasks.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -116,12 +116,12 @@ func (i *tasksInode) Lookup(ctx context.Context, name string) (kernfs.Inode, err
case threadSelfName:
return i.newThreadSelfSymlink(ctx, root), nil
}
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
task := i.pidns.TaskWithID(kernel.ThreadID(tid))
if task == nil {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
return i.fs.newTaskInode(ctx, task, i.pidns, true, i.fakeCgroupControllers)
diff --git a/pkg/sentry/fsimpl/proc/tasks_files.go b/pkg/sentry/fsimpl/proc/tasks_files.go
index 045ed7a2d..4d3a2f7e6 100644
--- a/pkg/sentry/fsimpl/proc/tasks_files.go
+++ b/pkg/sentry/fsimpl/proc/tasks_files.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// +stateify savable
@@ -53,11 +53,11 @@ func (s *selfSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string, error
t := kernel.TaskFromContext(ctx)
if t == nil {
// Who is reading this link?
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup())
if tgid == 0 {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
return strconv.FormatUint(uint64(tgid), 10), nil
}
@@ -69,7 +69,7 @@ func (s *selfSymlink) Getlink(ctx context.Context, mnt *vfs.Mount) (vfs.VirtualD
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*selfSymlink) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// +stateify savable
@@ -94,12 +94,12 @@ func (s *threadSelfSymlink) Readlink(ctx context.Context, _ *vfs.Mount) (string,
t := kernel.TaskFromContext(ctx)
if t == nil {
// Who is reading this link?
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup())
tid := s.pidns.IDOfTask(t)
if tid == 0 || tgid == 0 {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
return fmt.Sprintf("%d/task/%d", tgid, tid), nil
}
@@ -111,7 +111,7 @@ func (s *threadSelfSymlink) Getlink(ctx context.Context, mnt *vfs.Mount) (vfs.Vi
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*threadSelfSymlink) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// dynamicBytesFileSetAttr implements a special file that allows inode
diff --git a/pkg/sentry/fsimpl/proc/tasks_sys.go b/pkg/sentry/fsimpl/proc/tasks_sys.go
index 2bc98a94f..99f64a9d8 100644
--- a/pkg/sentry/fsimpl/proc/tasks_sys.go
+++ b/pkg/sentry/fsimpl/proc/tasks_sys.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -209,7 +209,7 @@ func (d *tcpSackData) Generate(ctx context.Context, buf *bytes.Buffer) error {
func (d *tcpSackData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// No need to handle partial writes thus far.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -257,7 +257,7 @@ func (d *tcpRecoveryData) Generate(ctx context.Context, buf *bytes.Buffer) error
func (d *tcpRecoveryData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// No need to handle partial writes thus far.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -311,7 +311,7 @@ func (d *tcpMemData) Generate(ctx context.Context, buf *bytes.Buffer) error {
func (d *tcpMemData) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// No need to handle partial writes thus far.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -396,7 +396,7 @@ func (ipf *ipForwarding) Generate(ctx context.Context, buf *bytes.Buffer) error
func (ipf *ipForwarding) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// No need to handle partial writes thus far.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -449,7 +449,7 @@ func (pr *portRange) Generate(ctx context.Context, buf *bytes.Buffer) error {
func (pr *portRange) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// No need to handle partial writes thus far.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -467,7 +467,7 @@ func (pr *portRange) Write(ctx context.Context, src usermem.IOSequence, offset i
// Port numbers must be uint16s.
if ports[0] < 0 || ports[1] < 0 || ports[0] > math.MaxUint16 || ports[1] > math.MaxUint16 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if err := pr.stack.SetPortRange(uint16(ports[0]), uint16(ports[1])); err != nil {
diff --git a/pkg/sentry/fsimpl/proc/tasks_test.go b/pkg/sentry/fsimpl/proc/tasks_test.go
index e534fbca8..14f806c3c 100644
--- a/pkg/sentry/fsimpl/proc/tasks_test.go
+++ b/pkg/sentry/fsimpl/proc/tasks_test.go
@@ -23,13 +23,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -227,7 +227,7 @@ func TestTasks(t *testing.T) {
defer fd.DecRef(s.Ctx)
buf := make([]byte, 1)
bufIOSeq := usermem.BytesIOSequence(buf)
- if _, err := fd.Read(s.Ctx, bufIOSeq, vfs.ReadOptions{}); err != syserror.EISDIR {
+ if _, err := fd.Read(s.Ctx, bufIOSeq, vfs.ReadOptions{}); !linuxerr.Equals(linuxerr.EISDIR, err) {
t.Errorf("wrong error reading directory: %v", err)
}
}
@@ -237,7 +237,7 @@ func TestTasks(t *testing.T) {
s.Creds,
s.PathOpAtRoot("/proc/9999"),
&vfs.OpenOptions{},
- ); err != syserror.ENOENT {
+ ); !linuxerr.Equals(linuxerr.ENOENT, err) {
t.Fatalf("wrong error from vfsfs.OpenAt(/proc/9999): %v", err)
}
}
diff --git a/pkg/sentry/fsimpl/proc/yama.go b/pkg/sentry/fsimpl/proc/yama.go
index e039ec45e..7240563d7 100644
--- a/pkg/sentry/fsimpl/proc/yama.go
+++ b/pkg/sentry/fsimpl/proc/yama.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -56,7 +56,7 @@ func (s *yamaPtraceScope) Generate(ctx context.Context, buf *bytes.Buffer) error
func (s *yamaPtraceScope) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
if offset != 0 {
// Ignore partial writes.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if src.NumBytes() == 0 {
return 0, nil
@@ -73,7 +73,7 @@ func (s *yamaPtraceScope) Write(ctx context.Context, src usermem.IOSequence, off
// We do not support YAMA levels > YAMA_SCOPE_RELATIONAL.
if v < linux.YAMA_SCOPE_DISABLED || v > linux.YAMA_SCOPE_RELATIONAL {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
atomic.StoreInt32(s.level, v)
diff --git a/pkg/sentry/fsimpl/signalfd/BUILD b/pkg/sentry/fsimpl/signalfd/BUILD
index adb610213..403c6f254 100644
--- a/pkg/sentry/fsimpl/signalfd/BUILD
+++ b/pkg/sentry/fsimpl/signalfd/BUILD
@@ -9,10 +9,10 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/kernel",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fsimpl/signalfd/signalfd.go b/pkg/sentry/fsimpl/signalfd/signalfd.go
index a7f5928b7..bdb03ef96 100644
--- a/pkg/sentry/fsimpl/signalfd/signalfd.go
+++ b/pkg/sentry/fsimpl/signalfd/signalfd.go
@@ -18,10 +18,10 @@ package signalfd
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -91,7 +91,7 @@ func (sfd *SignalFileDescription) Read(ctx context.Context, dst usermem.IOSequen
info, err := sfd.target.Sigtimedwait(sfd.Mask(), 0)
if err != nil {
// There must be no signal available.
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Copy out the signal info using the specified format.
diff --git a/pkg/sentry/fsimpl/sockfs/BUILD b/pkg/sentry/fsimpl/sockfs/BUILD
index 9453277b8..9defca936 100644
--- a/pkg/sentry/fsimpl/sockfs/BUILD
+++ b/pkg/sentry/fsimpl/sockfs/BUILD
@@ -9,10 +9,10 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/sentry/fsimpl/kernfs",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/fsimpl/sockfs/sockfs.go b/pkg/sentry/fsimpl/sockfs/sockfs.go
index 735756280..75934ecd0 100644
--- a/pkg/sentry/fsimpl/sockfs/sockfs.go
+++ b/pkg/sentry/fsimpl/sockfs/sockfs.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// filesystemType implements vfs.FilesystemType.
@@ -102,7 +102,7 @@ type inode struct {
// Open implements kernfs.Inode.Open.
func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
// StatFS implements kernfs.Inode.StatFS.
diff --git a/pkg/sentry/fsimpl/sys/BUILD b/pkg/sentry/fsimpl/sys/BUILD
index 09043b572..ab21f028e 100644
--- a/pkg/sentry/fsimpl/sys/BUILD
+++ b/pkg/sentry/fsimpl/sys/BUILD
@@ -26,6 +26,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/context",
"//pkg/coverage",
+ "//pkg/errors/linuxerr",
"//pkg/log",
"//pkg/refs",
"//pkg/refsvfs2",
@@ -35,7 +36,6 @@ go_library(
"//pkg/sentry/kernel/auth",
"//pkg/sentry/memmap",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsimpl/sys/kcov.go b/pkg/sentry/fsimpl/sys/kcov.go
index b13f141a8..51f0bf3d8 100644
--- a/pkg/sentry/fsimpl/sys/kcov.go
+++ b/pkg/sentry/fsimpl/sys/kcov.go
@@ -17,13 +17,13 @@ package sys
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -85,11 +85,11 @@ func (fd *kcovFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallAr
case linux.KCOV_DISABLE:
if arg != 0 {
// This arg is unused; it should be 0.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
return 0, fd.kcov.DisableTrace(ctx)
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/fsimpl/sys/sys.go b/pkg/sentry/fsimpl/sys/sys.go
index 14eb10dcd..f322d2747 100644
--- a/pkg/sentry/fsimpl/sys/sys.go
+++ b/pkg/sentry/fsimpl/sys/sys.go
@@ -23,12 +23,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/coverage"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -74,7 +74,7 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
maxCachedDentries, err = strconv.ParseUint(str, 10, 64)
if err != nil {
ctx.Warningf("sys.FilesystemType.GetFilesystem: invalid dentry cache limit: dentry_cache_limit=%s", str)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
}
@@ -174,7 +174,7 @@ func (fs *filesystem) newDir(ctx context.Context, creds *auth.Credentials, mode
// SetStat implements kernfs.Inode.SetStat not allowing inode attributes to be changed.
func (*dir) SetStat(context.Context, *vfs.Filesystem, *auth.Credentials, vfs.SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Open implements kernfs.Inode.Open.
diff --git a/pkg/sentry/fsimpl/testutil/kernel.go b/pkg/sentry/fsimpl/testutil/kernel.go
index 97aa20cd1..473b41cff 100644
--- a/pkg/sentry/fsimpl/testutil/kernel.go
+++ b/pkg/sentry/fsimpl/testutil/kernel.go
@@ -80,12 +80,8 @@ func Boot() (*kernel.Kernel, error) {
}
// Create timekeeper.
- tk, err := kernel.NewTimekeeper(k, vdso.ParamPage.FileRange())
- if err != nil {
- return nil, fmt.Errorf("creating timekeeper: %v", err)
- }
+ tk := kernel.NewTimekeeper(k, vdso.ParamPage.FileRange())
tk.SetClocks(time.NewCalibratedClocks())
- k.SetTimekeeper(tk)
creds := auth.NewRootCredentials(auth.NewRootUserNamespace())
@@ -94,6 +90,7 @@ func Boot() (*kernel.Kernel, error) {
if err = k.Init(kernel.InitKernelArgs{
ApplicationCores: uint(runtime.GOMAXPROCS(-1)),
FeatureSet: cpuid.HostFeatureSet(),
+ Timekeeper: tk,
RootUserNamespace: creds.UserNamespace,
Vdso: vdso,
RootUTSNamespace: kernel.NewUTSNamespace("hostname", "domain", creds.UserNamespace),
diff --git a/pkg/sentry/fsimpl/timerfd/BUILD b/pkg/sentry/fsimpl/timerfd/BUILD
index 7ce7dc429..2b83d7d9a 100644
--- a/pkg/sentry/fsimpl/timerfd/BUILD
+++ b/pkg/sentry/fsimpl/timerfd/BUILD
@@ -8,10 +8,10 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/kernel/time",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/fsimpl/timerfd/timerfd.go b/pkg/sentry/fsimpl/timerfd/timerfd.go
index cbb8b67c5..68b785791 100644
--- a/pkg/sentry/fsimpl/timerfd/timerfd.go
+++ b/pkg/sentry/fsimpl/timerfd/timerfd.go
@@ -19,10 +19,10 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -69,7 +69,7 @@ func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, clock ktime.Clock,
func (tfd *TimerFileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
const sizeofUint64 = 8
if dst.NumBytes() < sizeofUint64 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if val := atomic.SwapUint64(&tfd.val, 0); val != 0 {
var buf [sizeofUint64]byte
@@ -81,7 +81,7 @@ func (tfd *TimerFileDescription) Read(ctx context.Context, dst usermem.IOSequenc
}
return sizeofUint64, nil
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Clock returns the timer fd's Clock.
diff --git a/pkg/sentry/fsimpl/tmpfs/BUILD b/pkg/sentry/fsimpl/tmpfs/BUILD
index e21fddd7f..94486bb63 100644
--- a/pkg/sentry/fsimpl/tmpfs/BUILD
+++ b/pkg/sentry/fsimpl/tmpfs/BUILD
@@ -58,6 +58,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/amutex",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/log",
@@ -81,7 +82,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sentry/vfs/memxattr",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
@@ -94,6 +94,7 @@ go_test(
":tmpfs",
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/refs",
"//pkg/sentry/contexttest",
@@ -101,7 +102,6 @@ go_test(
"//pkg/sentry/fs/tmpfs",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
],
)
@@ -118,12 +118,12 @@ go_test(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs/lock",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go b/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
index 3cc63e732..2c29343c1 100644
--- a/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
+++ b/pkg/sentry/fsimpl/tmpfs/benchmark_test.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
@@ -30,7 +31,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Differences from stat_benchmark:
@@ -68,7 +68,7 @@ func fileOpOn(ctx context.Context, mntns *fs.MountNamespace, root, wd *fs.Dirent
rel = wd
} else {
// Need to extract the given FD.
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Lookup the node.
@@ -146,7 +146,7 @@ func BenchmarkVFS1TmpfsStat(b *testing.B) {
for i := 0; i < b.N; i++ {
err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
uattr, err := d.Inode.UnstableAttr(ctx)
if err != nil {
@@ -341,7 +341,7 @@ func BenchmarkVFS1TmpfsMountStat(b *testing.B) {
for i := 0; i < b.N; i++ {
err := fileOpOn(ctx, mntns, root, root, linux.AT_FDCWD, filePath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
uattr, err := d.Inode.UnstableAttr(ctx)
if err != nil {
diff --git a/pkg/sentry/fsimpl/tmpfs/directory.go b/pkg/sentry/fsimpl/tmpfs/directory.go
index e8d256495..c25494c0b 100644
--- a/pkg/sentry/fsimpl/tmpfs/directory.go
+++ b/pkg/sentry/fsimpl/tmpfs/directory.go
@@ -19,10 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// +stateify savable
@@ -196,10 +196,10 @@ func (fd *directoryFD) Seek(ctx context.Context, offset int64, whence int32) (in
case linux.SEEK_CUR:
offset += fd.off
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// If the offset isn't changing (e.g. due to lseek(0, SEEK_CUR)), don't
diff --git a/pkg/sentry/fsimpl/tmpfs/filesystem.go b/pkg/sentry/fsimpl/tmpfs/filesystem.go
index f0f4297ef..e067f136e 100644
--- a/pkg/sentry/fsimpl/tmpfs/filesystem.go
+++ b/pkg/sentry/fsimpl/tmpfs/filesystem.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Sync implements vfs.FilesystemImpl.Sync.
@@ -45,7 +45,7 @@ func (fs *filesystem) Sync(ctx context.Context) error {
func stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry) (*dentry, error) {
dir, ok := d.inode.impl.(*directory)
if !ok {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -70,11 +70,11 @@ afterSymlink:
return d.parent, nil
}
if len(name) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
child, ok := dir.childMap[name]
if !ok {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
return nil, err
@@ -112,7 +112,7 @@ func walkParentDirLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry)
}
dir, ok := d.inode.impl.(*directory)
if !ok {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return dir, nil
}
@@ -132,7 +132,7 @@ func resolveLocked(ctx context.Context, rp *vfs.ResolvingPath) (*dentry, error)
d = next
}
if rp.MustBeDir() && !d.inode.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -161,21 +161,21 @@ func (fs *filesystem) doCreateAt(ctx context.Context, rp *vfs.ResolvingPath, dir
}
name := rp.Component()
if name == "." || name == ".." {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if len(name) > linux.NAME_MAX {
- return syserror.ENAMETOOLONG
+ return linuxerr.ENAMETOOLONG
}
if _, ok := parentDir.childMap[name]; ok {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if !dir && rp.MustBeDir() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// tmpfs never calls VFS.InvalidateDentry(), so parentDir.dentry can only
// be dead if it was deleted.
if parentDir.dentry.vfsd.IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
mnt := rp.Mount()
if err := mnt.CheckBeginWrite(); err != nil {
@@ -220,7 +220,7 @@ func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, op
}
if opts.CheckSearchable {
if !d.inode.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.inode.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -246,21 +246,21 @@ func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa
func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
return fs.doCreateAt(ctx, rp, false /* dir */, func(parentDir *directory, name string) error {
if rp.Mount() != vd.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
d := vd.Dentry().Impl().(*dentry)
i := d.inode
if i.isDir() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := vfs.MayLink(auth.CredentialsFromContext(ctx), linux.FileMode(atomic.LoadUint32(&i.mode)), auth.KUID(atomic.LoadUint32(&i.uid)), auth.KGID(atomic.LoadUint32(&i.gid))); err != nil {
return err
}
if i.nlink == 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if i.nlink == maxLinks {
- return syserror.EMLINK
+ return linuxerr.EMLINK
}
i.incLinksLocked()
i.watches.Notify(ctx, "", linux.IN_ATTRIB, 0, vfs.InodeEvent, false /* unlinked */)
@@ -274,7 +274,7 @@ func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
return fs.doCreateAt(ctx, rp, true /* dir */, func(parentDir *directory, name string) error {
creds := rp.Credentials()
if parentDir.inode.nlink == maxLinks {
- return syserror.EMLINK
+ return linuxerr.EMLINK
}
parentDir.inode.incLinksLocked() // from child's ".."
childDir := fs.newDirectory(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode, parentDir)
@@ -300,7 +300,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
case linux.S_IFSOCK:
childInode = fs.newSocketFile(creds.EffectiveKUID, creds.EffectiveKGID, opts.Mode, opts.Endpoint, parentDir)
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
child := fs.newDentry(childInode)
parentDir.insertChildLocked(child, name)
@@ -312,7 +312,7 @@ func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts v
func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
if opts.Flags&linux.O_TMPFILE != 0 {
// Not yet supported.
- return nil, syserror.EOPNOTSUPP
+ return nil, linuxerr.EOPNOTSUPP
}
// Handle O_CREAT and !O_CREAT separately, since in the latter case we
@@ -344,10 +344,10 @@ func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vf
if rp.Done() {
// Reject attempts to open mount root directory with O_CREAT.
if rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
start.IncRef()
defer start.DecRef(ctx)
@@ -365,14 +365,14 @@ afterTrailingSymlink:
}
// Reject attempts to open directories with O_CREAT.
if rp.MustBeDir() {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
name := rp.Component()
if name == "." || name == ".." {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
if len(name) > linux.NAME_MAX {
- return nil, syserror.ENAMETOOLONG
+ return nil, linuxerr.ENAMETOOLONG
}
// Determine whether or not we need to create a file.
child, ok := parentDir.childMap[name]
@@ -401,7 +401,7 @@ afterTrailingSymlink:
return fd, nil
}
if mustCreate {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EEXIST
}
// Is the file mounted over?
if err := rp.CheckMount(ctx, &child.vfsd); err != nil {
@@ -418,7 +418,7 @@ afterTrailingSymlink:
goto afterTrailingSymlink
}
if rp.MustBeDir() && !child.inode.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
child.IncRef()
defer child.DecRef(ctx)
@@ -456,7 +456,7 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open
case *directory:
// Can't open directories writably.
if ats&vfs.MayWrite != 0 {
- return nil, syserror.EISDIR
+ return nil, linuxerr.EISDIR
}
var fd directoryFD
fd.LockFD.Init(&d.inode.locks)
@@ -466,13 +466,13 @@ func (d *dentry) open(ctx context.Context, rp *vfs.ResolvingPath, opts *vfs.Open
return &fd.vfsfd, nil
case *symlink:
// Can't open symlinks without O_PATH, which is handled at the VFS layer.
- return nil, syserror.ELOOP
+ return nil, linuxerr.ELOOP
case *namedPipe:
return impl.pipe.Open(ctx, rp.Mount(), &d.vfsd, opts.Flags, &d.inode.locks)
case *deviceFile:
return rp.VirtualFilesystem().OpenDeviceSpecialFile(ctx, rp.Mount(), &d.vfsd, impl.kind, impl.major, impl.minor, opts)
case *socketFile:
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
default:
panic(fmt.Sprintf("unknown inode type: %T", d.inode.impl))
}
@@ -488,7 +488,7 @@ func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (st
}
symlink, ok := d.inode.impl.(*symlink)
if !ok {
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
symlink.inode.touchAtime(rp.Mount())
return symlink.target, nil
@@ -506,19 +506,19 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
if opts.Flags&^linux.RENAME_NOREPLACE != 0 {
// TODO(b/145974740): Support other renameat2 flags.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
newName := rp.Component()
if newName == "." || newName == ".." {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
mnt := rp.Mount()
if mnt != oldParentVD.Mount() {
- return syserror.EXDEV
+ return linuxerr.EXDEV
}
if err := mnt.CheckBeginWrite(); err != nil {
return err
@@ -531,7 +531,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
renamed, ok := oldParentDir.childMap[oldName]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := oldParentDir.mayDelete(rp.Credentials(), renamed); err != nil {
return err
@@ -541,7 +541,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
// mounted filesystem.
if renamed.inode.isDir() {
if renamed == &newParentDir.dentry || genericIsAncestorDentry(renamed, &newParentDir.dentry) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if oldParentDir != newParentDir {
// Writability is needed to change renamed's "..".
@@ -551,7 +551,7 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
}
} else {
if opts.MustBeDir || rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
@@ -561,33 +561,33 @@ func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldPa
replaced, ok := newParentDir.childMap[newName]
if ok {
if opts.Flags&linux.RENAME_NOREPLACE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
replacedDir, ok := replaced.inode.impl.(*directory)
if ok {
if !renamed.inode.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if len(replacedDir.childMap) != 0 {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
} else {
if rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
if renamed.inode.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
} else {
if renamed.inode.isDir() && newParentDir.inode.nlink == maxLinks {
- return syserror.EMLINK
+ return linuxerr.EMLINK
}
}
// tmpfs never calls VFS.InvalidateDentry(), so newParentDir.dentry can
// only be dead if it was deleted.
if newParentDir.dentry.vfsd.IsDead() {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// Linux places this check before some of those above; we do it here for
@@ -646,24 +646,24 @@ func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error
}
name := rp.Component()
if name == "." {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if name == ".." {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
child, ok := parentDir.childMap[name]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := parentDir.mayDelete(rp.Credentials(), child); err != nil {
return err
}
childDir, ok := child.inode.impl.(*directory)
if !ok {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
if len(childDir.childMap) != 0 {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
mnt := rp.Mount()
if err := mnt.CheckBeginWrite(); err != nil {
@@ -753,20 +753,20 @@ func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error
}
name := rp.Component()
if name == "." || name == ".." {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
child, ok := parentDir.childMap[name]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if err := parentDir.mayDelete(rp.Credentials(), child); err != nil {
return err
}
if child.inode.isDir() {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
if rp.MustBeDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
mnt := rp.Mount()
if err := mnt.CheckBeginWrite(); err != nil {
@@ -806,11 +806,11 @@ func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath
switch impl := d.inode.impl.(type) {
case *socketFile:
if impl.ep == nil {
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
return impl.ep, nil
default:
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
}
diff --git a/pkg/sentry/fsimpl/tmpfs/pipe_test.go b/pkg/sentry/fsimpl/tmpfs/pipe_test.go
index 2f856ce36..99afd9817 100644
--- a/pkg/sentry/fsimpl/tmpfs/pipe_test.go
+++ b/pkg/sentry/fsimpl/tmpfs/pipe_test.go
@@ -20,11 +20,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -114,7 +114,7 @@ func TestNonblockingWriteError(t *testing.T) {
}
openOpts := vfs.OpenOptions{Flags: linux.O_WRONLY | linux.O_NONBLOCK}
_, err := vfsObj.OpenAt(ctx, creds, &pop, &openOpts)
- if err != syserror.ENXIO {
+ if !linuxerr.Equals(linuxerr.ENXIO, err) {
t.Fatalf("expected ENXIO, but got error: %v", err)
}
}
@@ -201,7 +201,7 @@ func checkEmpty(ctx context.Context, t *testing.T, fd *vfs.FileDescription) {
readData := make([]byte, 1)
dst := usermem.BytesIOSequence(readData)
bytesRead, err := fd.Read(ctx, dst, vfs.ReadOptions{})
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
t.Fatalf("expected ErrWouldBlock reading from empty pipe %q, but got: %v", fileName, err)
}
if bytesRead != 0 {
diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file.go b/pkg/sentry/fsimpl/tmpfs/regular_file.go
index c45bddff6..0f2ac6144 100644
--- a/pkg/sentry/fsimpl/tmpfs/regular_file.go
+++ b/pkg/sentry/fsimpl/tmpfs/regular_file.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -33,7 +34,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -185,7 +185,7 @@ func (rf *regularFile) truncateLocked(newSize uint64) (bool, error) {
// Can we grow the file?
if rf.seals&linux.F_SEAL_GROW != 0 {
rf.dataMu.Unlock()
- return false, syserror.EPERM
+ return false, linuxerr.EPERM
}
// We only need to update the file size.
atomic.StoreUint64(&rf.size, newSize)
@@ -196,7 +196,7 @@ func (rf *regularFile) truncateLocked(newSize uint64) (bool, error) {
// We are shrinking the file. First check if this is allowed.
if rf.seals&linux.F_SEAL_SHRINK != 0 {
rf.dataMu.Unlock()
- return false, syserror.EPERM
+ return false, linuxerr.EPERM
}
// Update the file size.
@@ -233,7 +233,7 @@ func (rf *regularFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, a
// Reject writable mapping if F_SEAL_WRITE is set.
if rf.seals&linux.F_SEAL_WRITE != 0 && writable {
- return syserror.EPERM
+ return linuxerr.EPERM
}
rf.mappings.AddMapping(ms, ar, offset, writable)
@@ -366,7 +366,7 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
fsmetric.TmpfsReads.Increment()
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that flags are supported. RWF_DSYNC/RWF_SYNC can be ignored since
@@ -374,7 +374,7 @@ func (fd *regularFileFD) PRead(ctx context.Context, dst usermem.IOSequence, offs
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^(linux.RWF_HIPRI|linux.RWF_DSYNC|linux.RWF_SYNC) != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
if dst.NumBytes() == 0 {
@@ -407,7 +407,7 @@ func (fd *regularFileFD) PWrite(ctx context.Context, src usermem.IOSequence, off
// final offset should be ignored by PWrite.
func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (written, finalOff int64, err error) {
if offset < 0 {
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
// Check that flags are supported. RWF_DSYNC/RWF_SYNC can be ignored since
@@ -415,7 +415,7 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
//
// TODO(gvisor.dev/issue/2601): Support select preadv2 flags.
if opts.Flags&^(linux.RWF_HIPRI|linux.RWF_DSYNC|linux.RWF_SYNC) != 0 {
- return 0, offset, syserror.EOPNOTSUPP
+ return 0, offset, linuxerr.EOPNOTSUPP
}
srclen := src.NumBytes()
@@ -432,7 +432,7 @@ func (fd *regularFileFD) pwrite(ctx context.Context, src usermem.IOSequence, off
}
if end := offset + srclen; end < offset {
// Overflow.
- return 0, offset, syserror.EINVAL
+ return 0, offset, linuxerr.EINVAL
}
srclen, err = vfs.CheckLimit(ctx, offset, srclen)
@@ -476,10 +476,10 @@ func (fd *regularFileFD) Seek(ctx context.Context, offset int64, whence int32) (
case linux.SEEK_END:
offset += int64(atomic.LoadUint64(&fd.inode().impl.(*regularFile).size))
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
fd.off = offset
return offset, nil
@@ -594,7 +594,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
// Check if seals prevent either file growth or all writes.
switch {
case rw.file.seals&linux.F_SEAL_WRITE != 0: // Write sealed
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
case end > rw.file.size && rw.file.seals&linux.F_SEAL_GROW != 0: // Grow sealed
// When growth is sealed, Linux effectively allows writes which would
// normally grow the file to partially succeed up to the current EOF,
@@ -615,7 +615,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
}
if end <= rw.off {
// Truncation would result in no data being written.
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
}
@@ -684,7 +684,7 @@ exitLoop:
func GetSeals(fd *vfs.FileDescription) (uint32, error) {
f, ok := fd.Impl().(*regularFileFD)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
rf := f.inode().impl.(*regularFile)
rf.dataMu.RLock()
@@ -696,7 +696,7 @@ func GetSeals(fd *vfs.FileDescription) (uint32, error) {
func AddSeals(fd *vfs.FileDescription, val uint32) error {
f, ok := fd.Impl().(*regularFileFD)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rf := f.inode().impl.(*regularFile)
rf.mapsMu.Lock()
@@ -706,13 +706,13 @@ func AddSeals(fd *vfs.FileDescription, val uint32) error {
if rf.seals&linux.F_SEAL_SEAL != 0 {
// Seal applied which prevents addition of any new seals.
- return syserror.EPERM
+ return linuxerr.EPERM
}
// F_SEAL_WRITE can only be added if there are no active writable maps.
if rf.seals&linux.F_SEAL_WRITE == 0 && val&linux.F_SEAL_WRITE != 0 {
if rf.writableMappingPages > 0 {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
diff --git a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go b/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
index 4393cc13b..cb7711b39 100644
--- a/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
+++ b/pkg/sentry/fsimpl/tmpfs/regular_file_test.go
@@ -21,10 +21,10 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -146,7 +146,7 @@ func TestLocks(t *testing.T) {
if err := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, nil); err != nil {
t.Fatalf("fd.Impl().LockBSD failed: err = %v", err)
}
- if got, want := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.WriteLock, nil), syserror.ErrWouldBlock; got != want {
+ if got, want := fd.Impl().LockBSD(ctx, uid2, 0 /* ownerPID */, lock.WriteLock, nil), linuxerr.ErrWouldBlock; got != want {
t.Fatalf("fd.Impl().LockBSD failed: got = %v, want = %v", got, want)
}
if err := fd.Impl().UnlockBSD(ctx, uid1); err != nil {
@@ -165,7 +165,7 @@ func TestLocks(t *testing.T) {
if err := fd.Impl().LockPOSIX(ctx, uid1, 0 /* ownerPID */, lock.WriteLock, lock.LockRange{Start: 0, End: 1}, nil); err != nil {
t.Fatalf("fd.Impl().LockPOSIX failed: err = %v", err)
}
- if got, want := fd.Impl().LockPOSIX(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, lock.LockRange{Start: 0, End: 1}, nil), syserror.ErrWouldBlock; got != want {
+ if got, want := fd.Impl().LockPOSIX(ctx, uid2, 0 /* ownerPID */, lock.ReadLock, lock.LockRange{Start: 0, End: 1}, nil), linuxerr.ErrWouldBlock; got != want {
t.Fatalf("fd.Impl().LockPOSIX failed: got = %v, want = %v", got, want)
}
if err := fd.Impl().UnlockPOSIX(ctx, uid1, lock.LockRange{Start: 0, End: 1}); err != nil {
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
index 6b4367c42..feafb06e4 100644
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
@@ -36,6 +36,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/time"
@@ -43,7 +44,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sentry/vfs/memxattr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Name is the default filesystem name.
@@ -138,7 +138,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: invalid mode: %q", modeStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
rootMode = linux.FileMode(mode & 07777)
}
@@ -149,12 +149,12 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
uid, err := strconv.ParseUint(uidStr, 10, 32)
if err != nil {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: invalid uid: %q", uidStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
kuid := creds.UserNamespace.MapToKUID(auth.UID(uid))
if !kuid.Ok() {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: unmapped uid: %d", uid)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
rootKUID = kuid
}
@@ -165,18 +165,18 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
gid, err := strconv.ParseUint(gidStr, 10, 32)
if err != nil {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: invalid gid: %q", gidStr)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
kgid := creds.UserNamespace.MapToKGID(auth.GID(gid))
if !kgid.Ok() {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: unmapped gid: %d", gid)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
rootKGID = kgid
}
if len(mopts) != 0 {
ctx.Warningf("tmpfs.FilesystemType.GetFilesystem: unknown options: %v", mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
devMinor, err := vfsObj.GetAnonBlockDevMinor()
@@ -396,8 +396,8 @@ func (i *inode) init(impl interface{}, fs *filesystem, kuid auth.KUID, kgid auth
}
// Inherit the group and setgid bit as in fs/inode.c:inode_init_owner().
- if parentDir != nil && parentDir.inode.mode&linux.S_ISGID == linux.S_ISGID {
- kgid = auth.KGID(parentDir.inode.gid)
+ if parentDir != nil && atomic.LoadUint32(&parentDir.inode.mode)&linux.S_ISGID == linux.S_ISGID {
+ kgid = auth.KGID(atomic.LoadUint32(&parentDir.inode.gid))
if mode&linux.S_IFDIR == linux.S_IFDIR {
mode |= linux.S_ISGID
}
@@ -527,7 +527,7 @@ func (i *inode) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs.
return nil
}
if stat.Mask&^(linux.STATX_MODE|linux.STATX_UID|linux.STATX_GID|linux.STATX_ATIME|linux.STATX_MTIME|linux.STATX_CTIME|linux.STATX_SIZE) != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
mode := linux.FileMode(atomic.LoadUint32(&i.mode))
if err := vfs.CheckSetStat(ctx, creds, opts, mode, auth.KUID(atomic.LoadUint32(&i.uid)), auth.KGID(atomic.LoadUint32(&i.gid))); err != nil {
@@ -555,9 +555,9 @@ func (i *inode) setStat(ctx context.Context, creds *auth.Credentials, opts *vfs.
needsCtimeBump = true
}
case *directory:
- return syserror.EISDIR
+ return linuxerr.EISDIR
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
if mask&linux.STATX_UID != 0 {
@@ -730,7 +730,7 @@ func checkXattrName(name string) error {
if strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {
return nil
}
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
func (i *inode) listXattr(creds *auth.Credentials, size uint64) ([]string, error) {
diff --git a/pkg/sentry/fsimpl/verity/BUILD b/pkg/sentry/fsimpl/verity/BUILD
index d473a922d..c12abdf33 100644
--- a/pkg/sentry/fsimpl/verity/BUILD
+++ b/pkg/sentry/fsimpl/verity/BUILD
@@ -1,10 +1,24 @@
load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
licenses(["notice"])
+go_template_instance(
+ name = "dentry_list",
+ out = "dentry_list.go",
+ package = "verity",
+ prefix = "dentry",
+ template = "//pkg/ilist:generic_list",
+ types = {
+ "Element": "*dentry",
+ "Linker": "*dentry",
+ },
+)
+
go_library(
name = "verity",
srcs = [
+ "dentry_list.go",
"filesystem.go",
"save_restore.go",
"verity.go",
@@ -13,6 +27,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/marshal/primitive",
@@ -27,7 +42,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
@@ -41,6 +55,7 @@ go_test(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/sentry/arch",
"//pkg/sentry/fsimpl/testutil",
@@ -48,7 +63,6 @@ go_test(
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/fsimpl/verity/filesystem.go b/pkg/sentry/fsimpl/verity/filesystem.go
index 3582d14c9..52d47994d 100644
--- a/pkg/sentry/fsimpl/verity/filesystem.go
+++ b/pkg/sentry/fsimpl/verity/filesystem.go
@@ -25,13 +25,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/merkletree"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -66,38 +66,23 @@ func putDentrySlice(ds *[]*dentry) {
dentrySlicePool.Put(ds)
}
-// renameMuRUnlockAndCheckDrop calls fs.renameMu.RUnlock(), then calls
-// dentry.checkDropLocked on all dentries in *ds with fs.renameMu locked for
+// renameMuRUnlockAndCheckCaching calls fs.renameMu.RUnlock(), then calls
+// dentry.checkCachingLocked on all dentries in *ds with fs.renameMu locked for
// writing.
//
// ds is a pointer-to-pointer since defer evaluates its arguments immediately,
// but dentry slices are allocated lazily, and it's much easier to say "defer
-// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
-// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
-func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
+// fs.renameMuRUnlockAndCheckCaching(&ds)" than "defer func() {
+// fs.renameMuRUnlockAndCheckCaching(ds) }()" to work around this.
+// +checklocksrelease:fs.renameMu
+func (fs *filesystem) renameMuRUnlockAndCheckCaching(ctx context.Context, ds **[]*dentry) {
fs.renameMu.RUnlock()
if *ds == nil {
return
}
- if len(**ds) != 0 {
- fs.renameMu.Lock()
- for _, d := range **ds {
- d.checkDropLocked(ctx)
- }
- fs.renameMu.Unlock()
- }
- putDentrySlice(*ds)
-}
-
-func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
- if *ds == nil {
- fs.renameMu.Unlock()
- return
- }
for _, d := range **ds {
- d.checkDropLocked(ctx)
+ d.checkCachingLocked(ctx, false /* renameMuWriteLocked */)
}
- fs.renameMu.Unlock()
putDentrySlice(*ds)
}
@@ -113,7 +98,7 @@ func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*de
// * !rp.Done().
func (fs *filesystem) stepLocked(ctx context.Context, rp *vfs.ResolvingPath, d *dentry, mayFollowSymlinks bool, ds **[]*dentry) (*dentry, error) {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
@@ -163,7 +148,7 @@ afterSymlink:
// verifyChildLocked verifies the hash of child against the already verified
// hash of the parent to ensure the child is expected. verifyChild triggers a
// sentry panic if unexpected modifications to the file system are detected. In
-// ErrorOnViolation mode it returns a syserror instead.
+// ErrorOnViolation mode it returns a linuxerr instead.
//
// Preconditions:
// * fs.renameMu must be locked.
@@ -195,7 +180,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi
// The Merkle tree file for the child should have been created and
// contains the expected xattrs. If the file or the xattr does not
// exist, it indicates unexpected modifications to the file system.
- if err == syserror.ENOENT || err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENODATA, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for %s: %v", merkleOffsetInParentXattr, childPath, err))
}
if err != nil {
@@ -218,7 +203,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi
// The parent Merkle tree file should have been created. If it's
// missing, it indicates an unexpected modification to the file system.
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to open parent Merkle file for %s: %v", childPath, err))
}
if err != nil {
@@ -238,7 +223,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi
// The Merkle tree file for the child should have been created and
// contains the expected xattrs. If the file or the xattr does not
// exist, it indicates unexpected modifications to the file system.
- if err == syserror.ENOENT || err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENODATA, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for %s: %v", merkleSizeXattr, childPath, err))
}
if err != nil {
@@ -261,7 +246,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi
Root: parent.lowerVD,
Start: parent.lowerVD,
}, &vfs.StatOptions{})
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to get parent stat for %s: %v", childPath, err))
}
if err != nil {
@@ -282,7 +267,7 @@ func (fs *filesystem) verifyChildLocked(ctx context.Context, parent *dentry, chi
Mode: uint32(parentStat.Mode),
UID: parentStat.UID,
GID: parentStat.GID,
- Children: parent.childrenNames,
+ Children: parent.childrenList,
HashAlgorithms: fs.alg.toLinuxHashAlg(),
ReadOffset: int64(offset),
ReadSize: int64(merkletree.DigestSize(fs.alg.toLinuxHashAlg())),
@@ -327,7 +312,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
}, &vfs.OpenOptions{
Flags: linux.O_RDONLY,
})
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return fs.alertIntegrityViolation(fmt.Sprintf("Failed to open merkle file for %s: %v", childPath, err))
}
if err != nil {
@@ -341,7 +326,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
Size: sizeOfStringInt32,
})
- if err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENODATA, err) {
return fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", merkleSizeXattr, childPath, err))
}
if err != nil {
@@ -359,7 +344,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
Size: sizeOfStringInt32,
})
- if err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENODATA, err) {
return fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", childrenOffsetXattr, childPath, err))
}
if err != nil {
@@ -375,7 +360,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
Size: sizeOfStringInt32,
})
- if err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENODATA, err) {
return fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s for merkle file of %s: %v", childrenSizeXattr, childPath, err))
}
if err != nil {
@@ -403,6 +388,9 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
var buf bytes.Buffer
d.hashMu.RLock()
+
+ d.generateChildrenList()
+
params := &merkletree.VerifyParams{
Out: &buf,
Tree: &fdReader,
@@ -411,7 +399,7 @@ func (fs *filesystem) verifyStatAndChildrenLocked(ctx context.Context, d *dentry
Mode: uint32(stat.Mode),
UID: stat.UID,
GID: stat.GID,
- Children: d.childrenNames,
+ Children: d.childrenList,
HashAlgorithms: fs.alg.toLinuxHashAlg(),
ReadOffset: 0,
// Set read size to 0 so only the metadata is verified.
@@ -465,7 +453,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
}
childVD, err := parent.getLowerAt(ctx, vfsObj, name)
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
// The file was previously accessed. If the
// file does not exist now, it indicates an
// unexpected modification to the file system.
@@ -480,7 +468,7 @@ func (fs *filesystem) getChildLocked(ctx context.Context, parent *dentry, name s
// The Merkle tree file was previous accessed. If it
// does not exist now, it indicates an unexpected
// modification to the file system.
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Expected Merkle file for target %s but none found", path))
}
if err != nil {
@@ -541,7 +529,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
if parent.verityEnabled() {
if _, ok := parent.childrenNames[name]; !ok {
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
}
@@ -551,7 +539,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
}
childVD, err := parent.getLowerAt(ctx, vfsObj, name)
- if parent.verityEnabled() && err == syserror.ENOENT {
+ if parent.verityEnabled() && linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("file %s expected but not found", parentPath+"/"+name))
}
if err != nil {
@@ -564,7 +552,7 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
childMerkleVD, err := parent.getLowerAt(ctx, vfsObj, merklePrefix+name)
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
if parent.verityEnabled() {
return nil, fs.alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", parentPath+"/"+name))
}
@@ -589,23 +577,6 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
}
}
- // Clear the Merkle tree file if they are to be generated at runtime.
- // TODO(b/182315468): Optimize the Merkle tree generate process to
- // allow only updating certain files/directories.
- if fs.allowRuntimeEnable {
- childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
- Root: childMerkleVD,
- Start: childMerkleVD,
- }, &vfs.OpenOptions{
- Flags: linux.O_RDWR | linux.O_TRUNC,
- Mode: 0644,
- })
- if err != nil {
- return nil, err
- }
- childMerkleFD.DecRef(ctx)
- }
-
// The dentry needs to be cleaned up if any error occurs. IncRef will be
// called if a verity child dentry is successfully created.
defer childMerkleVD.DecRef(ctx)
@@ -679,7 +650,7 @@ func (fs *filesystem) walkParentDirLocked(ctx context.Context, rp *vfs.Resolving
d = next
}
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -699,7 +670,7 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
d = next
}
if rp.MustBeDir() && !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return d, nil
}
@@ -708,11 +679,11 @@ func (fs *filesystem) resolveLocked(ctx context.Context, rp *vfs.ResolvingPath,
func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds *auth.Credentials, ats vfs.AccessTypes) error {
// Verity file system is read-only.
if ats&vfs.MayWrite != 0 {
- return syserror.EROFS
+ return linuxerr.EROFS
}
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return err
@@ -724,14 +695,14 @@ func (fs *filesystem) AccessAt(ctx context.Context, rp *vfs.ResolvingPath, creds
func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetDentryOptions) (*vfs.Dentry, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return nil, err
}
if opts.CheckSearchable {
if !d.isDir() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := d.checkPermissions(rp.Credentials(), vfs.MayExec); err != nil {
return nil, err
@@ -745,7 +716,7 @@ func (fs *filesystem) GetDentryAt(ctx context.Context, rp *vfs.ResolvingPath, op
func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPath) (*vfs.Dentry, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
start := rp.Start().Impl().(*dentry)
d, err := fs.walkParentDirLocked(ctx, rp, start, &ds)
if err != nil {
@@ -758,31 +729,31 @@ func (fs *filesystem) GetParentDentryAt(ctx context.Context, rp *vfs.ResolvingPa
// LinkAt implements vfs.FilesystemImpl.LinkAt.
func (fs *filesystem) LinkAt(ctx context.Context, rp *vfs.ResolvingPath, vd vfs.VirtualDentry) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// MkdirAt implements vfs.FilesystemImpl.MkdirAt.
func (fs *filesystem) MkdirAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MkdirOptions) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// MknodAt implements vfs.FilesystemImpl.MknodAt.
func (fs *filesystem) MknodAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.MknodOptions) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// OpenAt implements vfs.FilesystemImpl.OpenAt.
func (fs *filesystem) OpenAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
// Verity fs is read-only.
if opts.Flags&(linux.O_WRONLY|linux.O_CREAT) != 0 {
- return nil, syserror.EROFS
+ return nil, linuxerr.EROFS
}
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
start := rp.Start().Impl().(*dentry)
if rp.Done() {
@@ -826,7 +797,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
// Users should not open the Merkle tree files. Those are for verity fs
// use only.
if strings.Contains(d.name, merklePrefix) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
ats := vfs.AccessTypesForOpenFlags(opts)
if err := d.checkPermissions(rp.Credentials(), ats); err != nil {
@@ -835,7 +806,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
// Verity fs is read-only.
if ats&vfs.MayWrite != 0 {
- return nil, syserror.EROFS
+ return nil, linuxerr.EROFS
}
// Get the path to the target file. This is only used to provide path
@@ -845,16 +816,23 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
return nil, err
}
+ tmpOpts := *opts
+
+ // Open the lowerFD with O_PATH if a symlink is opened for verity.
+ if tmpOpts.Flags&linux.O_NOFOLLOW != 0 && d.isSymlink() {
+ tmpOpts.Flags |= linux.O_PATH
+ }
+
// Open the file in the underlying file system.
lowerFD, err := rp.VirtualFilesystem().OpenAt(ctx, d.fs.creds, &vfs.PathOperation{
Root: d.lowerVD,
Start: d.lowerVD,
- }, opts)
+ }, &tmpOpts)
// The file should exist, as we succeeded in finding its dentry. If it's
// missing, it indicates an unexpected modification to the file system.
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, d.fs.alertIntegrityViolation(fmt.Sprintf("File %s expected but not found", path))
}
return nil, err
@@ -877,7 +855,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
// dentry. If it's missing, it indicates an unexpected modification to
// the file system.
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, d.fs.alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", path))
}
return nil, err
@@ -887,7 +865,6 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
// be called if a verity FD is successfully created.
defer merkleReader.DecRef(ctx)
- lowerFlags := lowerFD.StatusFlags()
lowerFDOpts := lowerFD.Options()
var merkleWriter *vfs.FileDescription
var parentMerkleWriter *vfs.FileDescription
@@ -902,7 +879,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
Flags: linux.O_WRONLY | linux.O_APPEND,
})
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, d.fs.alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", path))
}
return nil, err
@@ -919,7 +896,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
Flags: linux.O_WRONLY | linux.O_APPEND,
})
if err != nil {
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
parentPath, _ := d.fs.vfsfs.VirtualFilesystem().PathnameWithDeleted(ctx, d.fs.rootDentry.lowerVD, d.parent.lowerVD)
return nil, d.fs.alertIntegrityViolation(fmt.Sprintf("Merkle file for %s expected but not found", parentPath))
}
@@ -940,7 +917,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
isDir: d.isDir(),
}
- if err := fd.vfsfd.Init(fd, lowerFlags, rp.Mount(), &d.vfsd, &lowerFDOpts); err != nil {
+ if err := fd.vfsfd.Init(fd, opts.Flags, rp.Mount(), &d.vfsd, &lowerFDOpts); err != nil {
return nil, err
}
lowerFD.IncRef()
@@ -958,7 +935,7 @@ func (d *dentry) openLocked(ctx context.Context, rp *vfs.ResolvingPath, opts *vf
func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (string, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return "", err
@@ -969,26 +946,26 @@ func (fs *filesystem) ReadlinkAt(ctx context.Context, rp *vfs.ResolvingPath) (st
// RenameAt implements vfs.FilesystemImpl.RenameAt.
func (fs *filesystem) RenameAt(ctx context.Context, rp *vfs.ResolvingPath, oldParentVD vfs.VirtualDentry, oldName string, opts vfs.RenameOptions) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// RmdirAt implements vfs.FilesystemImpl.RmdirAt.
func (fs *filesystem) RmdirAt(ctx context.Context, rp *vfs.ResolvingPath) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// SetStatAt implements vfs.FilesystemImpl.SetStatAt.
func (fs *filesystem) SetStatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetStatOptions) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// StatAt implements vfs.FilesystemImpl.StatAt.
func (fs *filesystem) StatAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.StatOptions) (linux.Statx, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return linux.Statx{}, err
@@ -1021,31 +998,31 @@ func (fs *filesystem) StatFSAt(ctx context.Context, rp *vfs.ResolvingPath) (linu
// SymlinkAt implements vfs.FilesystemImpl.SymlinkAt.
func (fs *filesystem) SymlinkAt(ctx context.Context, rp *vfs.ResolvingPath, target string) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// UnlinkAt implements vfs.FilesystemImpl.UnlinkAt.
func (fs *filesystem) UnlinkAt(ctx context.Context, rp *vfs.ResolvingPath) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// BoundEndpointAt implements vfs.FilesystemImpl.BoundEndpointAt.
func (fs *filesystem) BoundEndpointAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.BoundEndpointOptions) (transport.BoundEndpoint, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
if _, err := fs.resolveLocked(ctx, rp, &ds); err != nil {
return nil, err
}
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
// ListXattrAt implements vfs.FilesystemImpl.ListXattrAt.
func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, size uint64) ([]string, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return nil, err
@@ -1061,7 +1038,7 @@ func (fs *filesystem) ListXattrAt(ctx context.Context, rp *vfs.ResolvingPath, si
func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.GetXattrOptions) (string, error) {
var ds *[]*dentry
fs.renameMu.RLock()
- defer fs.renameMuRUnlockAndCheckDrop(ctx, &ds)
+ defer fs.renameMuRUnlockAndCheckCaching(ctx, &ds)
d, err := fs.resolveLocked(ctx, rp, &ds)
if err != nil {
return "", err
@@ -1076,13 +1053,13 @@ func (fs *filesystem) GetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opt
// SetXattrAt implements vfs.FilesystemImpl.SetXattrAt.
func (fs *filesystem) SetXattrAt(ctx context.Context, rp *vfs.ResolvingPath, opts vfs.SetXattrOptions) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// RemoveXattrAt implements vfs.FilesystemImpl.RemoveXattrAt.
func (fs *filesystem) RemoveXattrAt(ctx context.Context, rp *vfs.ResolvingPath, name string) error {
// Verity file system is read-only.
- return syserror.EROFS
+ return linuxerr.EROFS
}
// PrependPath implements vfs.FilesystemImpl.PrependPath.
diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go
index 969003613..d2526263c 100644
--- a/pkg/sentry/fsimpl/verity/verity.go
+++ b/pkg/sentry/fsimpl/verity/verity.go
@@ -23,10 +23,12 @@
// Lock order:
//
// filesystem.renameMu
-// dentry.dirMu
-// fileDescription.mu
-// filesystem.verityMu
-// dentry.hashMu
+// dentry.cachingMu
+// filesystem.cacheMu
+// dentry.dirMu
+// fileDescription.mu
+// filesystem.verityMu
+// dentry.hashMu
//
// Locking dentry.dirMu in multiple dentries requires that parent dentries are
// locked before child dentries, and that filesystem.renameMu is locked to
@@ -39,12 +41,14 @@ import (
"encoding/json"
"fmt"
"math"
+ "sort"
"strconv"
"strings"
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -58,7 +62,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -95,6 +98,9 @@ const (
// sizeOfStringInt32 is the size for a 32 bit integer stored as string in
// extended attributes. The maximum value of a 32 bit integer has 10 digits.
sizeOfStringInt32 = 10
+
+ // defaultMaxCachedDentries is the default limit of dentry cache.
+ defaultMaxCachedDentries = uint64(1000)
)
var (
@@ -105,9 +111,10 @@ var (
// Mount option names for verityfs.
const (
- moptLowerPath = "lower_path"
- moptRootHash = "root_hash"
- moptRootName = "root_name"
+ moptLowerPath = "lower_path"
+ moptRootHash = "root_hash"
+ moptRootName = "root_name"
+ moptDentryCacheLimit = "dentry_cache_limit"
)
// HashAlgorithm is a type specifying the algorithm used to hash the file
@@ -187,6 +194,17 @@ type filesystem struct {
// dentries.
renameMu sync.RWMutex `state:"nosave"`
+ // cachedDentries contains all dentries with 0 references. (Due to race
+ // conditions, it may also contain dentries with non-zero references.)
+ // cachedDentriesLen is the number of dentries in cachedDentries. These
+ // fields are protected by cacheMu.
+ cacheMu sync.Mutex `state:"nosave"`
+ cachedDentries dentryList
+ cachedDentriesLen uint64
+
+ // maxCachedDentries is the maximum size of filesystem.cachedDentries.
+ maxCachedDentries uint64
+
// verityMu synchronizes enabling verity files, protects files or
// directories from being enabled by different threads simultaneously.
// It also ensures that verity does not access files that are being
@@ -197,6 +215,10 @@ type filesystem struct {
// is for the whole file system to ensure that no more than one file is
// enabled the same time.
verityMu sync.RWMutex `state:"nosave"`
+
+ // released is nonzero once filesystem.Release has been called. It is accessed
+ // with atomic memory operations.
+ released int32
}
// InternalFilesystemOptions may be passed as
@@ -237,7 +259,7 @@ func (FilesystemType) Release(ctx context.Context) {}
// mode, it returns EIO, otherwise it panic.
func (fs *filesystem) alertIntegrityViolation(msg string) error {
if fs.action == ErrorOnViolation {
- return syserror.EIO
+ return linuxerr.EIO
}
panic(msg)
}
@@ -251,7 +273,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
hash, err := hex.DecodeString(encodedRootHash)
if err != nil {
ctx.Warningf("verity.FilesystemType.GetFilesystem: Failed to decode root hash: %v", err)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
rootHash = hash
}
@@ -265,23 +287,33 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
delete(mopts, moptRootName)
rootName = root
}
+ maxCachedDentries := defaultMaxCachedDentries
+ if str, ok := mopts[moptDentryCacheLimit]; ok {
+ delete(mopts, moptDentryCacheLimit)
+ maxCD, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ ctx.Warningf("verity.FilesystemType.GetFilesystem: invalid dentry cache limit: %s=%s", moptDentryCacheLimit, str)
+ return nil, nil, linuxerr.EINVAL
+ }
+ maxCachedDentries = maxCD
+ }
// Check for unparsed options.
if len(mopts) != 0 {
ctx.Warningf("verity.FilesystemType.GetFilesystem: unknown options: %v", mopts)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
// Handle internal options.
iopts, ok := opts.InternalData.(InternalFilesystemOptions)
if len(lowerPathname) == 0 && !ok {
ctx.Warningf("verity.FilesystemType.GetFilesystem: missing verity configs")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if len(lowerPathname) != 0 {
if ok {
ctx.Warningf("verity.FilesystemType.GetFilesystem: unexpected verity configs with specified lower path")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
iopts = InternalFilesystemOptions{
AllowRuntimeEnable: len(rootHash) == 0,
@@ -300,7 +332,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
lowerPath := fspath.Parse(lowerPathname)
if !lowerPath.Absolute {
ctx.Infof("verity.FilesystemType.GetFilesystem: lower_path %q must be absolute", lowerPathname)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
var err error
mountedLowerVD, err = vfsObj.GetDentryAt(ctx, creds, &vfs.PathOperation{
@@ -338,12 +370,16 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
action: iopts.Action,
opts: opts.Data,
allowRuntimeEnable: iopts.AllowRuntimeEnable,
+ maxCachedDentries: maxCachedDentries,
}
fs.vfsfs.Init(vfsObj, &fstype, fs)
// Construct the root dentry.
d := fs.newDentry()
- d.refs = 1
+ // Set the root's reference count to 2. One reference is returned to
+ // the caller, and the other is held by fs to prevent the root from
+ // being "cached" and subsequently evicted.
+ d.refs = 2
lowerVD := vfs.MakeVirtualDentry(lowerMount, lowerMount.Root())
lowerVD.IncRef()
d.lowerVD = lowerVD
@@ -358,7 +394,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// If runtime enable is allowed, the root merkle tree may be absent. We
// should create the tree file.
- if err == syserror.ENOENT && fs.allowRuntimeEnable {
+ if linuxerr.Equals(linuxerr.ENOENT, err) && fs.allowRuntimeEnable {
lowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
Root: lowerVD,
Start: lowerVD,
@@ -439,7 +475,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if !d.isDir() {
ctx.Warningf("verity root must be a directory")
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
if !fs.allowRuntimeEnable {
@@ -451,7 +487,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
Name: childrenOffsetXattr,
Size: sizeOfStringInt32,
})
- if err == syserror.ENOENT || err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENODATA, err) {
return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", childrenOffsetXattr, err))
}
if err != nil {
@@ -470,7 +506,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
Name: childrenSizeXattr,
Size: sizeOfStringInt32,
})
- if err == syserror.ENOENT || err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENOENT, err) || linuxerr.Equals(linuxerr.ENODATA, err) {
return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", childrenSizeXattr, err))
}
if err != nil {
@@ -487,7 +523,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
}, &vfs.OpenOptions{
Flags: linux.O_RDONLY,
})
- if err == syserror.ENOENT {
+ if linuxerr.Equals(linuxerr.ENOENT, err) {
return nil, nil, fs.alertIntegrityViolation(fmt.Sprintf("Failed to open root Merkle file: %v", err))
}
if err != nil {
@@ -508,6 +544,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
if err := fs.verifyStatAndChildrenLocked(ctx, d, stat); err != nil {
return nil, nil, err
}
+ d.generateChildrenList()
}
d.vfsd.Init(d)
@@ -517,7 +554,16 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Release implements vfs.FilesystemImpl.Release.
func (fs *filesystem) Release(ctx context.Context) {
+ atomic.StoreInt32(&fs.released, 1)
fs.lowerMount.DecRef(ctx)
+
+ fs.renameMu.Lock()
+ fs.evictAllCachedDentriesLocked(ctx)
+ fs.renameMu.Unlock()
+
+ // An extra reference was held by the filesystem on the root to prevent
+ // it from being cached/evicted.
+ fs.rootDentry.DecRef(ctx)
}
// MountOptions implements vfs.FilesystemImpl.MountOptions.
@@ -531,6 +577,11 @@ func (fs *filesystem) MountOptions() string {
type dentry struct {
vfsd vfs.Dentry
+ // refs is the reference count. Each dentry holds a reference on its
+ // parent, even if disowned. When refs reaches 0, the dentry may be
+ // added to the cache or destroyed. If refs == -1, the dentry has
+ // already been destroyed. refs is accessed using atomic memory
+ // operations.
refs int64
// fs is the owning filesystem. fs is immutable.
@@ -564,6 +615,11 @@ type dentry struct {
// populated by enableVerity. childrenNames is also protected by dirMu.
childrenNames map[string]struct{}
+ // childrenList is a complete sorted list of childrenNames. This list
+ // is generated when verity is enabled, or the first time the file is
+ // verified in non runtime enable mode.
+ childrenList []string
+
// lowerVD is the VirtualDentry in the underlying file system. It is
// never modified after initialized.
lowerVD vfs.VirtualDentry
@@ -580,13 +636,23 @@ type dentry struct {
// is protected by hashMu.
hashMu sync.RWMutex `state:"nosave"`
hash []byte
+
+ // cachingMu is used to synchronize concurrent dentry caching attempts on
+ // this dentry.
+ cachingMu sync.Mutex `state:"nosave"`
+
+ // If cached is true, dentryEntry links dentry into
+ // filesystem.cachedDentries. cached and dentryEntry are protected by
+ // cachingMu.
+ cached bool
+ dentryEntry
}
// newDentry creates a new dentry representing the given verity file. The
-// dentry initially has no references; it is the caller's responsibility to set
-// the dentry's reference count and/or call dentry.destroy() as appropriate.
-// The dentry is initially invalid in that it contains no underlying dentry;
-// the caller is responsible for setting them.
+// dentry initially has no references, but is not cached; it is the caller's
+// responsibility to set the dentry's reference count and/or call
+// dentry.destroy() as appropriate. The dentry is initially invalid in that it
+// contains no underlying dentry; the caller is responsible for setting them.
func (fs *filesystem) newDentry() *dentry {
d := &dentry{
fs: fs,
@@ -622,42 +688,23 @@ func (d *dentry) TryIncRef() bool {
// DecRef implements vfs.DentryImpl.DecRef.
func (d *dentry) DecRef(ctx context.Context) {
- r := atomic.AddInt64(&d.refs, -1)
- if d.LogRefs() {
- refsvfs2.LogDecRef(d, r)
- }
- if r == 0 {
- d.fs.renameMu.Lock()
- d.checkDropLocked(ctx)
- d.fs.renameMu.Unlock()
- } else if r < 0 {
- panic("verity.dentry.DecRef() called without holding a reference")
+ if d.decRefNoCaching() == 0 {
+ d.checkCachingLocked(ctx, false /* renameMuWriteLocked */)
}
}
-func (d *dentry) decRefLocked(ctx context.Context) {
+// decRefNoCaching decrements d's reference count without calling
+// d.checkCachingLocked, even if d's reference count reaches 0; callers are
+// responsible for ensuring that d.checkCachingLocked will be called later.
+func (d *dentry) decRefNoCaching() int64 {
r := atomic.AddInt64(&d.refs, -1)
if d.LogRefs() {
refsvfs2.LogDecRef(d, r)
}
- if r == 0 {
- d.checkDropLocked(ctx)
- } else if r < 0 {
- panic("verity.dentry.decRefLocked() called without holding a reference")
- }
-}
-
-// checkDropLocked should be called after d's reference count becomes 0 or it
-// becomes deleted.
-func (d *dentry) checkDropLocked(ctx context.Context) {
- // Dentries with a positive reference count must be retained. Dentries
- // with a negative reference count have already been destroyed.
- if atomic.LoadInt64(&d.refs) != 0 {
- return
+ if r < 0 {
+ panic("verity.dentry.decRefNoCaching() called without holding a reference")
}
- // Refs is still zero; destroy it.
- d.destroyLocked(ctx)
- return
+ return r
}
// destroyLocked destroys the dentry.
@@ -676,6 +723,12 @@ func (d *dentry) destroyLocked(ctx context.Context) {
panic("verity.dentry.destroyLocked() called with references on the dentry")
}
+ // Drop the reference held by d on its parent without recursively
+ // locking d.fs.renameMu.
+ if d.parent != nil && d.parent.decRefNoCaching() == 0 {
+ d.parent.checkCachingLocked(ctx, true /* renameMuWriteLocked */)
+ }
+
if d.lowerVD.Ok() {
d.lowerVD.DecRef(ctx)
}
@@ -688,7 +741,6 @@ func (d *dentry) destroyLocked(ctx context.Context) {
delete(d.parent.children, d.name)
}
d.parent.dirMu.Unlock()
- d.parent.decRefLocked(ctx)
}
refsvfs2.Unregister(d)
}
@@ -727,6 +779,140 @@ func (d *dentry) OnZeroWatches(context.Context) {
//TODO(b/159261227): Implement OnZeroWatches.
}
+// checkCachingLocked should be called after d's reference count becomes 0 or
+// it becomes disowned.
+//
+// For performance, checkCachingLocked can also be called after d's reference
+// count becomes non-zero, so that d can be removed from the LRU cache. This
+// may help in reducing the size of the cache and hence reduce evictions. Note
+// that this is not necessary for correctness.
+//
+// It may be called on a destroyed dentry. For example,
+// renameMu[R]UnlockAndCheckCaching may call checkCachingLocked multiple times
+// for the same dentry when the dentry is visited more than once in the same
+// operation. One of the calls may destroy the dentry, so subsequent calls will
+// do nothing.
+//
+// Preconditions: d.fs.renameMu must be locked for writing if
+// renameMuWriteLocked is true; it may be temporarily unlocked.
+func (d *dentry) checkCachingLocked(ctx context.Context, renameMuWriteLocked bool) {
+ d.cachingMu.Lock()
+ refs := atomic.LoadInt64(&d.refs)
+ if refs == -1 {
+ // Dentry has already been destroyed.
+ d.cachingMu.Unlock()
+ return
+ }
+ if refs > 0 {
+ // fs.cachedDentries is permitted to contain dentries with non-zero refs,
+ // which are skipped by fs.evictCachedDentryLocked() upon reaching the end
+ // of the LRU. But it is still beneficial to remove d from the cache as we
+ // are already holding d.cachingMu. Keeping a cleaner cache also reduces
+ // the number of evictions (which is expensive as it acquires fs.renameMu).
+ d.removeFromCacheLocked()
+ d.cachingMu.Unlock()
+ return
+ }
+
+ if atomic.LoadInt32(&d.fs.released) != 0 {
+ d.cachingMu.Unlock()
+ if !renameMuWriteLocked {
+ // Need to lock d.fs.renameMu to access d.parent. Lock it for writing as
+ // needed by d.destroyLocked() later.
+ d.fs.renameMu.Lock()
+ defer d.fs.renameMu.Unlock()
+ }
+ if d.parent != nil {
+ d.parent.dirMu.Lock()
+ delete(d.parent.children, d.name)
+ d.parent.dirMu.Unlock()
+ }
+ d.destroyLocked(ctx) // +checklocksforce: see above.
+ return
+ }
+
+ d.fs.cacheMu.Lock()
+ // If d is already cached, just move it to the front of the LRU.
+ if d.cached {
+ d.fs.cachedDentries.Remove(d)
+ d.fs.cachedDentries.PushFront(d)
+ d.fs.cacheMu.Unlock()
+ d.cachingMu.Unlock()
+ return
+ }
+ // Cache the dentry, then evict the least recently used cached dentry if
+ // the cache becomes over-full.
+ d.fs.cachedDentries.PushFront(d)
+ d.fs.cachedDentriesLen++
+ d.cached = true
+ shouldEvict := d.fs.cachedDentriesLen > d.fs.maxCachedDentries
+ d.fs.cacheMu.Unlock()
+ d.cachingMu.Unlock()
+
+ if shouldEvict {
+ if !renameMuWriteLocked {
+ // Need to lock d.fs.renameMu for writing as needed by
+ // d.evictCachedDentryLocked().
+ d.fs.renameMu.Lock()
+ defer d.fs.renameMu.Unlock()
+ }
+ d.fs.evictCachedDentryLocked(ctx) // +checklocksforce: see above.
+ }
+}
+
+// Preconditions: d.cachingMu must be locked.
+func (d *dentry) removeFromCacheLocked() {
+ if d.cached {
+ d.fs.cacheMu.Lock()
+ d.fs.cachedDentries.Remove(d)
+ d.fs.cachedDentriesLen--
+ d.fs.cacheMu.Unlock()
+ d.cached = false
+ }
+}
+
+// Precondition: fs.renameMu must be locked for writing; it may be temporarily
+// unlocked.
+// +checklocks:fs.renameMu
+func (fs *filesystem) evictAllCachedDentriesLocked(ctx context.Context) {
+ for fs.cachedDentriesLen != 0 {
+ fs.evictCachedDentryLocked(ctx)
+ }
+}
+
+// Preconditions:
+// * fs.renameMu must be locked for writing; it may be temporarily unlocked.
+// +checklocks:fs.renameMu
+func (fs *filesystem) evictCachedDentryLocked(ctx context.Context) {
+ fs.cacheMu.Lock()
+ victim := fs.cachedDentries.Back()
+ fs.cacheMu.Unlock()
+ if victim == nil {
+ // fs.cachedDentries may have become empty between when it was
+ // checked and when we locked fs.cacheMu.
+ return
+ }
+
+ victim.cachingMu.Lock()
+ victim.removeFromCacheLocked()
+ // victim.refs may have become non-zero from an earlier path resolution
+ // since it was inserted into fs.cachedDentries.
+ if atomic.LoadInt64(&victim.refs) != 0 {
+ victim.cachingMu.Unlock()
+ return
+ }
+ if victim.parent != nil {
+ victim.parent.dirMu.Lock()
+ // Note that victim can't be a mount point (in any mount
+ // namespace), since VFS holds references on mount points.
+ fs.vfsfs.VirtualFilesystem().InvalidateDentry(ctx, &victim.vfsd)
+ delete(victim.parent.children, victim.name)
+ victim.parent.dirMu.Unlock()
+ }
+ victim.cachingMu.Unlock()
+ victim.destroyLocked(ctx) // +checklocksforce: owned as precondition, victim.fs == fs.
+}
+
func (d *dentry) isSymlink() bool {
return atomic.LoadUint32(&d.mode)&linux.S_IFMT == linux.S_IFLNK
}
@@ -749,6 +935,17 @@ func (d *dentry) verityEnabled() bool {
return !d.fs.allowRuntimeEnable || len(d.hash) != 0
}
+// generateChildrenList generates a sorted childrenList from childrenNames, and
+// cache it in d for hashing.
+func (d *dentry) generateChildrenList() {
+ if len(d.childrenList) == 0 && len(d.childrenNames) != 0 {
+ for child := range d.childrenNames {
+ d.childrenList = append(d.childrenList, child)
+ }
+ sort.Strings(d.childrenList)
+ }
+}
+
// getLowerAt returns the dentry in the underlying file system, which is
// represented by filename relative to d.
func (d *dentry) getLowerAt(ctx context.Context, vfsObj *vfs.VirtualFilesystem, filename string) (vfs.VirtualDentry, error) {
@@ -857,13 +1054,13 @@ func (fd *fileDescription) Stat(ctx context.Context, opts vfs.StatOptions) (linu
// SetStat implements vfs.FileDescriptionImpl.SetStat.
func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions) error {
// Verity files are read-only.
- return syserror.EPERM
+ return linuxerr.EPERM
}
// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
func (fd *fileDescription) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
if !fd.d.isDir() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
fd.mu.Lock()
defer fd.mu.Unlock()
@@ -921,14 +1118,14 @@ func (fd *fileDescription) Seek(ctx context.Context, offset int64, whence int32)
case linux.SEEK_END:
n = int64(fd.d.size)
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset > math.MaxInt64-n {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
offset += n
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
fd.off = offset
return offset, nil
@@ -962,10 +1159,12 @@ func (fd *fileDescription) generateMerkleLocked(ctx context.Context) ([]byte, ui
return nil, 0, err
}
+ fd.d.generateChildrenList()
+
params := &merkletree.GenerateParams{
TreeReader: &merkleReader,
TreeWriter: &merkleWriter,
- Children: fd.d.childrenNames,
+ Children: fd.d.childrenList,
HashAlgorithms: fd.d.fs.alg.toLinuxHashAlg(),
Name: fd.d.name,
Mode: uint32(stat.Mode),
@@ -1007,7 +1206,7 @@ func (fd *fileDescription) generateMerkleLocked(ctx context.Context) ([]byte, ui
default:
// TODO(b/167728857): Investigate whether and how we should
// enable other types of file.
- return nil, 0, syserror.EINVAL
+ return nil, 0, linuxerr.EINVAL
}
hash, err := merkletree.Generate(params)
return hash, uint64(params.Size), err
@@ -1056,7 +1255,7 @@ func (fd *fileDescription) recordChildrenLocked(ctx context.Context) error {
// and stores its hash in its parent directory's Merkle tree.
func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {
if !fd.d.fs.allowRuntimeEnable {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
fd.d.fs.verityMu.Lock()
@@ -1070,6 +1269,21 @@ func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {
return 0, fd.d.fs.alertIntegrityViolation("Unexpected verity fd: missing expected underlying fds")
}
+ // Populate children names here. We cannot rely on the children
+ // dentries to populate parent dentry's children names, because the
+ // parent dentry may be destroyed before users enable verity if its ref
+ // count drops to zero.
+ if fd.d.isDir() {
+ if err := fd.IterDirents(ctx, vfs.IterDirentsCallbackFunc(func(dirent vfs.Dirent) error {
+ if dirent.Name != "." && dirent.Name != ".." {
+ fd.d.childrenNames[dirent.Name] = struct{}{}
+ }
+ return nil
+ })); err != nil {
+ return 0, err
+ }
+ }
+
hash, dataSize, err := fd.generateMerkleLocked(ctx)
if err != nil {
return 0, err
@@ -1097,9 +1311,6 @@ func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {
}); err != nil {
return 0, err
}
-
- // Add the current child's name to parent's childrenNames.
- fd.d.parent.childrenNames[fd.d.name] = struct{}{}
}
// Record the size of the data being hashed for fd.
@@ -1125,7 +1336,7 @@ func (fd *fileDescription) enableVerity(ctx context.Context) (uintptr, error) {
func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest hostarch.Addr) (uintptr, error) {
t := kernel.TaskFromContext(ctx)
if t == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var metadata linux.DigestMetadata
@@ -1138,7 +1349,7 @@ func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest hosta
// enabled, in which case fd.d.hash should be set.
if len(fd.d.hash) == 0 {
if fd.d.fs.allowRuntimeEnable {
- return 0, syserror.ENODATA
+ return 0, linuxerr.ENODATA
}
return 0, fd.d.fs.alertIntegrityViolation("Ioctl measureVerity: no hash found")
}
@@ -1148,7 +1359,7 @@ func (fd *fileDescription) measureVerity(ctx context.Context, verityDigest hosta
return 0, err
}
if metadata.DigestSize < uint16(len(fd.d.hash)) {
- return 0, syserror.EOVERFLOW
+ return 0, linuxerr.EOVERFLOW
}
// Populate the output digest size, since DigestSize is both input and
@@ -1178,7 +1389,7 @@ func (fd *fileDescription) verityFlags(ctx context.Context, flags hostarch.Addr)
t := kernel.TaskFromContext(ctx)
if t == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
_, err := primitive.CopyInt32Out(t, flags, f)
return 0, err
@@ -1194,7 +1405,7 @@ func (fd *fileDescription) Ioctl(ctx context.Context, uio usermem.IO, args arch.
case linux.FS_IOC_GETFLAGS:
return fd.verityFlags(ctx, args[2].Pointer())
default:
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
}
}
@@ -1227,7 +1438,7 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
// The Merkle tree file for the child should have been created and
// contains the expected xattrs. If the xattr does not exist, it
// indicates unexpected modifications to the file system.
- if err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENODATA, err) {
return 0, fd.d.fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", merkleSizeXattr, err))
}
if err != nil {
@@ -1261,7 +1472,7 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
Mode: fd.d.mode,
UID: fd.d.uid,
GID: fd.d.gid,
- Children: fd.d.childrenNames,
+ Children: fd.d.childrenList,
HashAlgorithms: fd.d.fs.alg.toLinuxHashAlg(),
ReadOffset: offset,
ReadSize: dst.NumBytes(),
@@ -1277,12 +1488,12 @@ func (fd *fileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
// PWrite implements vfs.FileDescriptionImpl.PWrite.
func (fd *fileDescription) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EROFS
+ return 0, linuxerr.EROFS
}
// Write implements vfs.FileDescriptionImpl.Write.
func (fd *fileDescription) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.EROFS
+ return 0, linuxerr.EROFS
}
// ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap.
@@ -1298,7 +1509,7 @@ func (fd *fileDescription) ConfigureMMap(ctx context.Context, opts *memmap.MMapO
// Check if mmap is allowed on the lower filesystem.
if !opts.SentryOwnedContent {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
return vfs.GenericConfigureMMap(&fd.vfsfd, fd, opts)
}
@@ -1349,7 +1560,7 @@ func (fd *fileDescription) Translate(ctx context.Context, required, optional mem
// The Merkle tree file for the child should have been created and
// contains the expected xattrs. If the xattr does not exist, it
// indicates unexpected modifications to the file system.
- if err == syserror.ENODATA {
+ if linuxerr.Equals(linuxerr.ENODATA, err) {
return nil, fd.d.fs.alertIntegrityViolation(fmt.Sprintf("Failed to get xattr %s: %v", merkleSizeXattr, err))
}
if err != nil {
@@ -1433,7 +1644,7 @@ func (r *mmapReadSeeker) ReadAt(p []byte, off int64) (int, error) {
// mapped region.
readOffset := off - int64(r.Offset)
if readOffset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
bs.DropFirst64(uint64(readOffset))
view := bs.TakeFirst64(uint64(len(p)))
diff --git a/pkg/sentry/fsimpl/verity/verity_test.go b/pkg/sentry/fsimpl/verity/verity_test.go
index 5c78a0019..af041bd50 100644
--- a/pkg/sentry/fsimpl/verity/verity_test.go
+++ b/pkg/sentry/fsimpl/verity/verity_test.go
@@ -24,6 +24,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/testutil"
@@ -31,7 +32,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -476,7 +476,7 @@ func TestOpenNonexistentFile(t *testing.T) {
// Ensure open an unexpected file in the parent directory fails with
// ENOENT rather than verification failure.
- if _, err = openVerityAt(ctx, vfsObj, root, filename+"abc", linux.O_RDONLY, linux.ModeRegular); err != syserror.ENOENT {
+ if _, err = openVerityAt(ctx, vfsObj, root, filename+"abc", linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.ENOENT, err) {
t.Errorf("OpenAt unexpected error: %v", err)
}
}
@@ -767,7 +767,7 @@ func TestOpenDeletedFileFails(t *testing.T) {
}
// Ensure reopening the verity enabled file fails.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); err != syserror.EIO {
+ if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
t.Errorf("got OpenAt error: %v, expected EIO", err)
}
})
@@ -829,7 +829,7 @@ func TestOpenRenamedFileFails(t *testing.T) {
}
// Ensure reopening the verity enabled file fails.
- if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); err != syserror.EIO {
+ if _, err = openVerityAt(ctx, vfsObj, root, filename, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
t.Errorf("got OpenAt error: %v, expected EIO", err)
}
})
@@ -899,7 +899,7 @@ func TestUnmodifiedSymlinkFileReadSucceeds(t *testing.T) {
t.Fatalf("SymlinkAt: %v", err)
}
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_PATH|linux.O_NOFOLLOW, linux.ModeRegular)
+ fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
if err != nil {
t.Fatalf("openVerityAt symlink: %v", err)
@@ -1034,7 +1034,7 @@ func TestDeletedSymlinkFileReadFails(t *testing.T) {
t.Fatalf("SymlinkAt: %v", err)
}
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_PATH|linux.O_NOFOLLOW, linux.ModeRegular)
+ fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
if err != nil {
t.Fatalf("openVerityAt symlink: %v", err)
@@ -1063,14 +1063,14 @@ func TestDeletedSymlinkFileReadFails(t *testing.T) {
Root: root,
Start: root,
Path: fspath.Parse(symlink),
- }); err != syserror.EIO {
+ }); !linuxerr.Equals(linuxerr.EIO, err) {
t.Fatalf("ReadlinkAt succeeded with modified symlink: %v", err)
}
if tc.testWalk {
fileInSymlinkDirectory := symlink + "/verity-test-file"
// Ensure opening the verity enabled file in the symlink directory fails.
- if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); err != syserror.EIO {
+ if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
t.Errorf("Open succeeded with modified symlink: %v", err)
}
}
@@ -1136,7 +1136,7 @@ func TestModifiedSymlinkFileReadFails(t *testing.T) {
}
// Open symlink file to get the fd for ioctl in new step.
- fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_PATH|linux.O_NOFOLLOW, linux.ModeRegular)
+ fd, err := openVerityAt(ctx, vfsObj, root, symlink, linux.O_NOFOLLOW, linux.ModeRegular)
if err != nil {
t.Fatalf("OpenAt symlink: %v", err)
}
@@ -1195,14 +1195,14 @@ func TestModifiedSymlinkFileReadFails(t *testing.T) {
Root: root,
Start: root,
Path: fspath.Parse(symlink),
- }); err != syserror.EIO {
+ }); !linuxerr.Equals(linuxerr.EIO, err) {
t.Fatalf("ReadlinkAt succeeded with modified symlink: %v", err)
}
if tc.testWalk {
fileInSymlinkDirectory := symlink + "/verity-test-file"
// Ensure opening the verity enabled file in the symlink directory fails.
- if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); err != syserror.EIO {
+ if _, err := openVerityAt(ctx, vfsObj, root, fileInSymlinkDirectory, linux.O_RDONLY, linux.ModeRegular); !linuxerr.Equals(linuxerr.EIO, err) {
t.Errorf("Open succeeded with modified symlink: %v", err)
}
}
diff --git a/pkg/sentry/hostfd/hostfd_linux.go b/pkg/sentry/hostfd/hostfd_linux.go
index 1cabc848f..0131da22d 100644
--- a/pkg/sentry/hostfd/hostfd_linux.go
+++ b/pkg/sentry/hostfd/hostfd_linux.go
@@ -12,7 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package hostfd
-// maxIov is the maximum permitted size of a struct iovec array.
-const maxIov = 1024 // UIO_MAXIOV
+// MaxReadWriteIov is the maximum permitted size of a struct iovec array in a
+// readv, writev, preadv, or pwritev host syscall.
+const MaxReadWriteIov = 1024 // UIO_MAXIOV
+
+// MaxSendRecvMsgIov is the maximum permitted size of a struct iovec array in a
+// sendmsg or recvmsg host syscall.
+const MaxSendRecvMsgIov = 1024 // UIO_MAXIOV
diff --git a/pkg/sentry/hostfd/hostfd_unsafe.go b/pkg/sentry/hostfd/hostfd_unsafe.go
index 03c6d2a16..a43311eb4 100644
--- a/pkg/sentry/hostfd/hostfd_unsafe.go
+++ b/pkg/sentry/hostfd/hostfd_unsafe.go
@@ -23,6 +23,11 @@ import (
"gvisor.dev/gvisor/pkg/safemem"
)
+const (
+ sizeofIovec = unsafe.Sizeof(unix.Iovec{})
+ sizeofMsghdr = unsafe.Sizeof(unix.Msghdr{})
+)
+
// Preadv2 reads up to dsts.NumBytes() bytes from host file descriptor fd into
// dsts. offset and flags are interpreted as for preadv2(2).
//
@@ -44,9 +49,9 @@ func Preadv2(fd int32, dsts safemem.BlockSeq, offset int64, flags uint32) (uint6
}
} else {
iovs := safemem.IovecsFromBlockSeq(dsts)
- if len(iovs) > maxIov {
- log.Debugf("hostfd.Preadv2: truncating from %d iovecs to %d", len(iovs), maxIov)
- iovs = iovs[:maxIov]
+ if len(iovs) > MaxReadWriteIov {
+ log.Debugf("hostfd.Preadv2: truncating from %d iovecs to %d", len(iovs), MaxReadWriteIov)
+ iovs = iovs[:MaxReadWriteIov]
}
n, _, e = unix.Syscall6(unix.SYS_PREADV2, uintptr(fd), uintptr((unsafe.Pointer)(&iovs[0])), uintptr(len(iovs)), uintptr(offset), 0 /* pos_h */, uintptr(flags))
}
@@ -80,9 +85,9 @@ func Pwritev2(fd int32, srcs safemem.BlockSeq, offset int64, flags uint32) (uint
}
} else {
iovs := safemem.IovecsFromBlockSeq(srcs)
- if len(iovs) > maxIov {
- log.Debugf("hostfd.Preadv2: truncating from %d iovecs to %d", len(iovs), maxIov)
- iovs = iovs[:maxIov]
+ if len(iovs) > MaxReadWriteIov {
+ log.Debugf("hostfd.Preadv2: truncating from %d iovecs to %d", len(iovs), MaxReadWriteIov)
+ iovs = iovs[:MaxReadWriteIov]
}
n, _, e = unix.Syscall6(unix.SYS_PWRITEV2, uintptr(fd), uintptr((unsafe.Pointer)(&iovs[0])), uintptr(len(iovs)), uintptr(offset), 0 /* pos_h */, uintptr(flags))
}
diff --git a/pkg/sentry/inet/inet.go b/pkg/sentry/inet/inet.go
index 80dda1559..b121fc1b4 100644
--- a/pkg/sentry/inet/inet.go
+++ b/pkg/sentry/inet/inet.go
@@ -27,6 +27,9 @@ type Stack interface {
// integers.
Interfaces() map[int32]Interface
+ // RemoveInterface removes the specified network interface.
+ RemoveInterface(idx int32) error
+
// InterfaceAddrs returns all network interface addresses as a mapping from
// interface indexes to a slice of associated interface address properties.
InterfaceAddrs() map[int32][]InterfaceAddr
diff --git a/pkg/sentry/inet/test_stack.go b/pkg/sentry/inet/test_stack.go
index 218d9dafc..621f47e1f 100644
--- a/pkg/sentry/inet/test_stack.go
+++ b/pkg/sentry/inet/test_stack.go
@@ -45,23 +45,29 @@ func NewTestStack() *TestStack {
}
}
-// Interfaces implements Stack.Interfaces.
+// Interfaces implements Stack.
func (s *TestStack) Interfaces() map[int32]Interface {
return s.InterfacesMap
}
-// InterfaceAddrs implements Stack.InterfaceAddrs.
+// RemoveInterface implements Stack.
+func (s *TestStack) RemoveInterface(idx int32) error {
+ delete(s.InterfacesMap, idx)
+ return nil
+}
+
+// InterfaceAddrs implements Stack.
func (s *TestStack) InterfaceAddrs() map[int32][]InterfaceAddr {
return s.InterfaceAddrsMap
}
-// AddInterfaceAddr implements Stack.AddInterfaceAddr.
+// AddInterfaceAddr implements Stack.
func (s *TestStack) AddInterfaceAddr(idx int32, addr InterfaceAddr) error {
s.InterfaceAddrsMap[idx] = append(s.InterfaceAddrsMap[idx], addr)
return nil
}
-// RemoveInterfaceAddr implements Stack.RemoveInterfaceAddr.
+// RemoveInterfaceAddr implements Stack.
func (s *TestStack) RemoveInterfaceAddr(idx int32, addr InterfaceAddr) error {
interfaceAddrs, ok := s.InterfaceAddrsMap[idx]
if !ok {
@@ -79,94 +85,94 @@ func (s *TestStack) RemoveInterfaceAddr(idx int32, addr InterfaceAddr) error {
return nil
}
-// SupportsIPv6 implements Stack.SupportsIPv6.
+// SupportsIPv6 implements Stack.
func (s *TestStack) SupportsIPv6() bool {
return s.SupportsIPv6Flag
}
-// TCPReceiveBufferSize implements Stack.TCPReceiveBufferSize.
+// TCPReceiveBufferSize implements Stack.
func (s *TestStack) TCPReceiveBufferSize() (TCPBufferSize, error) {
return s.TCPRecvBufSize, nil
}
-// SetTCPReceiveBufferSize implements Stack.SetTCPReceiveBufferSize.
+// SetTCPReceiveBufferSize implements Stack.
func (s *TestStack) SetTCPReceiveBufferSize(size TCPBufferSize) error {
s.TCPRecvBufSize = size
return nil
}
-// TCPSendBufferSize implements Stack.TCPSendBufferSize.
+// TCPSendBufferSize implements Stack.
func (s *TestStack) TCPSendBufferSize() (TCPBufferSize, error) {
return s.TCPSendBufSize, nil
}
-// SetTCPSendBufferSize implements Stack.SetTCPSendBufferSize.
+// SetTCPSendBufferSize implements Stack.
func (s *TestStack) SetTCPSendBufferSize(size TCPBufferSize) error {
s.TCPSendBufSize = size
return nil
}
-// TCPSACKEnabled implements Stack.TCPSACKEnabled.
+// TCPSACKEnabled implements Stack.
func (s *TestStack) TCPSACKEnabled() (bool, error) {
return s.TCPSACKFlag, nil
}
-// SetTCPSACKEnabled implements Stack.SetTCPSACKEnabled.
+// SetTCPSACKEnabled implements Stack.
func (s *TestStack) SetTCPSACKEnabled(enabled bool) error {
s.TCPSACKFlag = enabled
return nil
}
-// TCPRecovery implements Stack.TCPRecovery.
+// TCPRecovery implements Stack.
func (s *TestStack) TCPRecovery() (TCPLossRecovery, error) {
return s.Recovery, nil
}
-// SetTCPRecovery implements Stack.SetTCPRecovery.
+// SetTCPRecovery implements Stack.
func (s *TestStack) SetTCPRecovery(recovery TCPLossRecovery) error {
s.Recovery = recovery
return nil
}
-// Statistics implements inet.Stack.Statistics.
+// Statistics implements Stack.
func (s *TestStack) Statistics(stat interface{}, arg string) error {
return nil
}
-// RouteTable implements Stack.RouteTable.
+// RouteTable implements Stack.
func (s *TestStack) RouteTable() []Route {
return s.RouteList
}
-// Resume implements Stack.Resume.
+// Resume implements Stack.
func (s *TestStack) Resume() {}
-// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.
+// RegisteredEndpoints implements Stack.
func (s *TestStack) RegisteredEndpoints() []stack.TransportEndpoint {
return nil
}
-// CleanupEndpoints implements inet.Stack.CleanupEndpoints.
+// CleanupEndpoints implements Stack.
func (s *TestStack) CleanupEndpoints() []stack.TransportEndpoint {
return nil
}
-// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.
+// RestoreCleanupEndpoints implements Stack.
func (s *TestStack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}
-// SetForwarding implements inet.Stack.SetForwarding.
+// SetForwarding implements Stack.
func (s *TestStack) SetForwarding(protocol tcpip.NetworkProtocolNumber, enable bool) error {
s.IPForwarding = enable
return nil
}
-// PortRange implements inet.Stack.PortRange.
+// PortRange implements Stack.
func (*TestStack) PortRange() (uint16, uint16) {
// Use the default Linux values per net/ipv4/af_inet.c:inet_init_net().
return 32768, 28232
}
-// SetPortRange implements inet.Stack.SetPortRange.
+// SetPortRange implements Stack.
func (*TestStack) SetPortRange(start uint16, end uint16) error {
// No-op.
return nil
diff --git a/pkg/sentry/kernel/BUILD b/pkg/sentry/kernel/BUILD
index 188c0ebff..c0f13bf52 100644
--- a/pkg/sentry/kernel/BUILD
+++ b/pkg/sentry/kernel/BUILD
@@ -218,13 +218,17 @@ go_library(
":uncaught_signal_go_proto",
"//pkg/abi",
"//pkg/abi/linux",
+ "//pkg/abi/linux/errno",
"//pkg/amutex",
+ "//pkg/bitmap",
"//pkg/bits",
"//pkg/bpf",
"//pkg/cleanup",
"//pkg/context",
"//pkg/coverage",
"//pkg/cpuid",
+ "//pkg/errors",
+ "//pkg/errors/linuxerr",
"//pkg/eventchannel",
"//pkg/fspath",
"//pkg/goid",
@@ -253,6 +257,7 @@ go_library(
"//pkg/sentry/kernel/auth",
"//pkg/sentry/kernel/epoll",
"//pkg/sentry/kernel/futex",
+ "//pkg/sentry/kernel/msgqueue",
"//pkg/sentry/kernel/sched",
"//pkg/sentry/kernel/semaphore",
"//pkg/sentry/kernel/shm",
@@ -263,6 +268,7 @@ go_library(
"//pkg/sentry/mm",
"//pkg/sentry/pgalloc",
"//pkg/sentry/platform",
+ "//pkg/sentry/seccheck",
"//pkg/sentry/socket/netlink/port",
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/time",
@@ -276,7 +282,6 @@ go_library(
"//pkg/state/wire",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/tcpip/stack",
"//pkg/usermem",
@@ -298,6 +303,7 @@ go_test(
deps = [
"//pkg/abi",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/arch",
"//pkg/sentry/contexttest",
@@ -309,6 +315,5 @@ go_test(
"//pkg/sentry/time",
"//pkg/sentry/usage",
"//pkg/sync",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/kernel/abstract_socket_namespace.go b/pkg/sentry/kernel/abstract_socket_namespace.go
index d100e58d7..5d86a04f3 100644
--- a/pkg/sentry/kernel/abstract_socket_namespace.go
+++ b/pkg/sentry/kernel/abstract_socket_namespace.go
@@ -27,7 +27,7 @@ import (
// +stateify savable
type abstractEndpoint struct {
ep transport.BoundEndpoint
- socket refsvfs2.RefCounter
+ socket refsvfs2.TryRefCounter
name string
ns *AbstractSocketNamespace
}
@@ -57,7 +57,7 @@ func NewAbstractSocketNamespace() *AbstractSocketNamespace {
// its backing socket.
type boundEndpoint struct {
transport.BoundEndpoint
- socket refsvfs2.RefCounter
+ socket refsvfs2.TryRefCounter
}
// Release implements transport.BoundEndpoint.Release.
@@ -89,7 +89,7 @@ func (a *AbstractSocketNamespace) BoundEndpoint(name string) transport.BoundEndp
//
// When the last reference managed by socket is dropped, ep may be removed from the
// namespace.
-func (a *AbstractSocketNamespace) Bind(ctx context.Context, name string, ep transport.BoundEndpoint, socket refsvfs2.RefCounter) error {
+func (a *AbstractSocketNamespace) Bind(ctx context.Context, name string, ep transport.BoundEndpoint, socket refsvfs2.TryRefCounter) error {
a.mu.Lock()
defer a.mu.Unlock()
@@ -109,7 +109,7 @@ func (a *AbstractSocketNamespace) Bind(ctx context.Context, name string, ep tran
// Remove removes the specified socket at name from the abstract socket
// namespace, if it has not yet been replaced.
-func (a *AbstractSocketNamespace) Remove(name string, socket refsvfs2.RefCounter) {
+func (a *AbstractSocketNamespace) Remove(name string, socket refsvfs2.TryRefCounter) {
a.mu.Lock()
defer a.mu.Unlock()
diff --git a/pkg/sentry/kernel/auth/BUILD b/pkg/sentry/kernel/auth/BUILD
index 12180351d..9aa03f506 100644
--- a/pkg/sentry/kernel/auth/BUILD
+++ b/pkg/sentry/kernel/auth/BUILD
@@ -63,8 +63,8 @@ go_library(
"//pkg/abi/linux",
"//pkg/bits",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/log",
"//pkg/sync",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/kernel/auth/credentials.go b/pkg/sentry/kernel/auth/credentials.go
index 3325fedcb..fc245c54b 100644
--- a/pkg/sentry/kernel/auth/credentials.go
+++ b/pkg/sentry/kernel/auth/credentials.go
@@ -16,7 +16,7 @@ package auth
import (
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// Credentials contains information required to authorize privileged operations
@@ -203,7 +203,7 @@ func (c *Credentials) UseUID(uid UID) (KUID, error) {
// uid must be mapped.
kuid := c.UserNamespace.MapToKUID(uid)
if !kuid.Ok() {
- return NoID, syserror.EINVAL
+ return NoID, linuxerr.EINVAL
}
// If c has CAP_SETUID, then it can use any UID in its user namespace.
if c.HasCapability(linux.CAP_SETUID) {
@@ -214,7 +214,7 @@ func (c *Credentials) UseUID(uid UID) (KUID, error) {
if kuid == c.RealKUID || kuid == c.EffectiveKUID || kuid == c.SavedKUID {
return kuid, nil
}
- return NoID, syserror.EPERM
+ return NoID, linuxerr.EPERM
}
// UseGID checks that c can use gid in its user namespace, then translates it
@@ -222,7 +222,7 @@ func (c *Credentials) UseUID(uid UID) (KUID, error) {
func (c *Credentials) UseGID(gid GID) (KGID, error) {
kgid := c.UserNamespace.MapToKGID(gid)
if !kgid.Ok() {
- return NoID, syserror.EINVAL
+ return NoID, linuxerr.EINVAL
}
if c.HasCapability(linux.CAP_SETGID) {
return kgid, nil
@@ -230,7 +230,7 @@ func (c *Credentials) UseGID(gid GID) (KGID, error) {
if kgid == c.RealKGID || kgid == c.EffectiveKGID || kgid == c.SavedKGID {
return kgid, nil
}
- return NoID, syserror.EPERM
+ return NoID, linuxerr.EPERM
}
// SetUID translates the provided uid to the root user namespace and updates c's
@@ -239,7 +239,7 @@ func (c *Credentials) UseGID(gid GID) (KGID, error) {
func (c *Credentials) SetUID(uid UID) error {
kuid := c.UserNamespace.MapToKUID(uid)
if !kuid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
c.RealKUID = kuid
c.EffectiveKUID = kuid
@@ -253,7 +253,7 @@ func (c *Credentials) SetUID(uid UID) error {
func (c *Credentials) SetGID(gid GID) error {
kgid := c.UserNamespace.MapToKGID(gid)
if !kgid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
c.RealKGID = kgid
c.EffectiveKGID = kgid
diff --git a/pkg/sentry/kernel/auth/id_map.go b/pkg/sentry/kernel/auth/id_map.go
index 28cbe159d..f06a374a0 100644
--- a/pkg/sentry/kernel/auth/id_map.go
+++ b/pkg/sentry/kernel/auth/id_map.go
@@ -17,7 +17,7 @@ package auth
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// MapFromKUID translates kuid, a UID in the root namespace, to a UID in ns.
@@ -106,11 +106,11 @@ func (ns *UserNamespace) SetUIDMap(ctx context.Context, entries []IDMapEntry) er
// than once to a uid_map file in a user namespace fails with the error
// EPERM. Similar rules apply for gid_map files." - user_namespaces(7)
if !ns.uidMapFromParent.IsEmpty() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "At least one line must be written to the file."
if len(entries) == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// """
// In order for a process to write to the /proc/[pid]/uid_map
@@ -121,12 +121,12 @@ func (ns *UserNamespace) SetUIDMap(ctx context.Context, entries []IDMapEntry) er
// in the user namespace of the process pid.
// """
if !c.HasCapabilityIn(linux.CAP_SETUID, ns) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "2. The writing process must either be in the user namespace of the process
// pid or be in the parent user namespace of the process pid."
if c.UserNamespace != ns && c.UserNamespace != ns.parent {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// """
// 3. (see trySetUIDMap)
@@ -145,14 +145,14 @@ func (ns *UserNamespace) SetUIDMap(ctx context.Context, entries []IDMapEntry) er
// parent user namespace to a user ID (group ID) in the user namespace.
// """
if len(entries) != 1 || ns.parent.MapToKUID(UID(entries[0].FirstParentID)) != c.EffectiveKUID || entries[0].Length != 1 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// """
// + The writing process must have the same effective user ID as the
// process that created the user namespace.
// """
if c.EffectiveKUID != ns.owner {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
// trySetUIDMap leaves data in maps if it fails.
@@ -170,11 +170,11 @@ func (ns *UserNamespace) trySetUIDMap(entries []IDMapEntry) error {
// checks for NoID.
lastID := e.FirstID + e.Length
if lastID <= e.FirstID {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
lastParentID := e.FirstParentID + e.Length
if lastParentID <= e.FirstParentID {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// "3. The mapped user IDs (group IDs) must in turn have a mapping in
// the parent user namespace."
@@ -182,14 +182,14 @@ func (ns *UserNamespace) trySetUIDMap(entries []IDMapEntry) error {
// mappings when it's created, so SetUIDMap would have returned EPERM
// without reaching this point if ns is root.
if !ns.parent.allIDsMapped(&ns.parent.uidMapToParent, e.FirstParentID, lastParentID) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// If either of these Adds fail, we have an overlapping range.
if !ns.uidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if !ns.uidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
return nil
@@ -202,24 +202,24 @@ func (ns *UserNamespace) SetGIDMap(ctx context.Context, entries []IDMapEntry) er
ns.mu.Lock()
defer ns.mu.Unlock()
if !ns.gidMapFromParent.IsEmpty() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if len(entries) == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if !c.HasCapabilityIn(linux.CAP_SETGID, ns) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if c.UserNamespace != ns && c.UserNamespace != ns.parent {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if !c.HasCapabilityIn(linux.CAP_SETGID, ns.parent) {
if len(entries) != 1 || ns.parent.MapToKGID(GID(entries[0].FirstParentID)) != c.EffectiveKGID || entries[0].Length != 1 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// It's correct for this to still be UID.
if c.EffectiveKUID != ns.owner {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "In the case of gid_map, use of the setgroups(2) system call must
// first be denied by writing "deny" to the /proc/[pid]/setgroups file
@@ -239,20 +239,20 @@ func (ns *UserNamespace) trySetGIDMap(entries []IDMapEntry) error {
for _, e := range entries {
lastID := e.FirstID + e.Length
if lastID <= e.FirstID {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
lastParentID := e.FirstParentID + e.Length
if lastParentID <= e.FirstParentID {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if !ns.parent.allIDsMapped(&ns.parent.gidMapToParent, e.FirstParentID, lastParentID) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if !ns.gidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if !ns.gidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
return nil
diff --git a/pkg/sentry/kernel/auth/user_namespace.go b/pkg/sentry/kernel/auth/user_namespace.go
index 9dd52c860..40a406f9d 100644
--- a/pkg/sentry/kernel/auth/user_namespace.go
+++ b/pkg/sentry/kernel/auth/user_namespace.go
@@ -17,8 +17,8 @@ package auth
import (
"math"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A UserNamespace represents a user namespace. See user_namespaces(7) for
@@ -105,7 +105,7 @@ func (c *Credentials) NewChildUserNamespace() (*UserNamespace, error) {
if c.UserNamespace.depth() >= maxUserNamespaceDepth {
// "... Calls to unshare(2) or clone(2) that would cause this limit to
// be exceeded fail with the error EUSERS." - user_namespaces(7)
- return nil, syserror.EUSERS
+ return nil, linuxerr.EUSERS
}
// "EPERM: CLONE_NEWUSER was specified in flags, but either the effective
// user ID or the effective group ID of the caller does not have a mapping
@@ -114,10 +114,10 @@ func (c *Credentials) NewChildUserNamespace() (*UserNamespace, error) {
// process are mapped to user IDs and group IDs in the user namespace of
// the calling process at the time of the call." - unshare(2)
if !c.EffectiveKUID.In(c.UserNamespace).Ok() {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
if !c.EffectiveKGID.In(c.UserNamespace).Ok() {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
return &UserNamespace{
parent: c.UserNamespace,
diff --git a/pkg/sentry/kernel/cgroup.go b/pkg/sentry/kernel/cgroup.go
index c93ef6ac1..a0e291f58 100644
--- a/pkg/sentry/kernel/cgroup.go
+++ b/pkg/sentry/kernel/cgroup.go
@@ -196,6 +196,7 @@ func (r *CgroupRegistry) FindHierarchy(ctypes []CgroupControllerType) *vfs.Files
// uniqueness of controllers enforced by Register, drop the
// dying hierarchy now. The eventual unregister by the FS
// teardown will become a no-op.
+ r.unregisterLocked(h.id)
return nil
}
return h.fs
diff --git a/pkg/sentry/kernel/eventfd/BUILD b/pkg/sentry/kernel/eventfd/BUILD
index 564c3d42e..f240a68aa 100644
--- a/pkg/sentry/kernel/eventfd/BUILD
+++ b/pkg/sentry/kernel/eventfd/BUILD
@@ -9,13 +9,13 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fdnotifier",
"//pkg/hostarch",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go
index 4466fbc9d..5ea44a2c2 100644
--- a/pkg/sentry/kernel/eventfd/eventfd.go
+++ b/pkg/sentry/kernel/eventfd/eventfd.go
@@ -22,13 +22,13 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -145,7 +145,7 @@ func (e *EventOperations) hostRead(ctx context.Context, dst usermem.IOSequence)
if _, err := unix.Read(e.hostfd, buf[:]); err != nil {
if err == unix.EWOULDBLOCK {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
return err
}
@@ -165,7 +165,7 @@ func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) erro
// We can't complete the read if the value is currently zero.
if e.val == 0 {
e.mu.Unlock()
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
// Update the value based on the mode the event is operating in.
@@ -198,7 +198,7 @@ func (e *EventOperations) hostWrite(val uint64) error {
hostarch.ByteOrder.PutUint64(buf[:], val)
_, err := unix.Write(e.hostfd, buf[:])
if err == unix.EWOULDBLOCK {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
return err
}
@@ -230,7 +230,7 @@ func (e *EventOperations) Signal(val uint64) error {
// uint64 minus 1.
if val > math.MaxUint64-1-e.val {
e.mu.Unlock()
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
e.val += val
diff --git a/pkg/sentry/kernel/fasync/BUILD b/pkg/sentry/kernel/fasync/BUILD
index 6224a0cbd..6b2dd09da 100644
--- a/pkg/sentry/kernel/fasync/BUILD
+++ b/pkg/sentry/kernel/fasync/BUILD
@@ -8,12 +8,12 @@ go_library(
visibility = ["//:sandbox"],
deps = [
"//pkg/abi/linux",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/fs",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/waiter",
],
)
diff --git a/pkg/sentry/kernel/fasync/fasync.go b/pkg/sentry/kernel/fasync/fasync.go
index 5d584dc45..473987a79 100644
--- a/pkg/sentry/kernel/fasync/fasync.go
+++ b/pkg/sentry/kernel/fasync/fasync.go
@@ -17,12 +17,12 @@ package fasync
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -248,7 +248,7 @@ func (a *FileAsync) Signal() linux.Signal {
// to send SIGIO.
func (a *FileAsync) SetSignal(signal linux.Signal) error {
if signal != 0 && !signal.IsValid() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
a.mu.Lock()
defer a.mu.Unlock()
diff --git a/pkg/sentry/kernel/fd_table.go b/pkg/sentry/kernel/fd_table.go
index 62777faa8..eff556a0c 100644
--- a/pkg/sentry/kernel/fd_table.go
+++ b/pkg/sentry/kernel/fd_table.go
@@ -18,17 +18,17 @@ import (
"fmt"
"math"
"strings"
- "sync/atomic"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/bitmap"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// FDFlags define flags for an individual descriptor.
@@ -84,13 +84,8 @@ type FDTable struct {
// mu protects below.
mu sync.Mutex `state:"nosave"`
- // next is start position to find fd.
- next int32
-
- // used contains the number of non-nil entries. It must be accessed
- // atomically. It may be read atomically without holding mu (but not
- // written).
- used int32
+ // fdBitmap shows which fds are already in use.
+ fdBitmap bitmap.Bitmap `state:"nosave"`
// descriptorTable holds descriptors.
descriptorTable `state:".(map[int32]descriptor)"`
@@ -98,6 +93,8 @@ type FDTable struct {
func (f *FDTable) saveDescriptorTable() map[int32]descriptor {
m := make(map[int32]descriptor)
+ f.mu.Lock()
+ defer f.mu.Unlock()
f.forEach(context.Background(), func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {
m[fd] = descriptor{
file: file,
@@ -111,12 +108,16 @@ func (f *FDTable) saveDescriptorTable() map[int32]descriptor {
func (f *FDTable) loadDescriptorTable(m map[int32]descriptor) {
ctx := context.Background()
f.initNoLeakCheck() // Initialize table.
- f.used = 0
+ f.fdBitmap = bitmap.New(uint32(math.MaxUint16))
for fd, d := range m {
+ if fd < 0 {
+ panic(fmt.Sprintf("FD is not supposed to be negative. FD: %d", fd))
+ }
+
if file, fileVFS2 := f.setAll(ctx, fd, d.file, d.fileVFS2, d.flags); file != nil || fileVFS2 != nil {
panic("VFS1 or VFS2 files set")
}
-
+ f.fdBitmap.Add(uint32(fd))
// Note that we do _not_ need to acquire a extra table reference here. The
// table reference will already be accounted for in the file, so we drop the
// reference taken by set above.
@@ -156,7 +157,7 @@ func (f *FDTable) dropVFS2(ctx context.Context, file *vfs.FileDescription) {
// Release any POSIX lock possibly held by the FDTable.
if file.SupportsLocks() {
err := file.UnlockPOSIX(ctx, f, lock.LockRange{0, lock.LockEOF})
- if err != nil && err != syserror.ENOLCK {
+ if err != nil && !linuxerr.Equals(linuxerr.ENOLCK, err) {
panic(fmt.Sprintf("UnlockPOSIX failed: %v", err))
}
}
@@ -189,8 +190,10 @@ func (f *FDTable) DecRef(ctx context.Context) {
func (f *FDTable) forEach(ctx context.Context, fn func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags)) {
// retries tracks the number of failed TryIncRef attempts for the same FD.
retries := 0
- fd := int32(0)
- for {
+ fds := f.fdBitmap.ToSlice()
+ // Iterate through the fdBitmap.
+ for _, ufd := range fds {
+ fd := int32(ufd)
file, fileVFS2, flags, ok := f.getAll(fd)
if !ok {
break
@@ -218,7 +221,6 @@ func (f *FDTable) forEach(ctx context.Context, fn func(fd int32, file *fs.File,
fileVFS2.DecRef(ctx)
}
retries = 0
- fd++
}
}
@@ -226,6 +228,8 @@ func (f *FDTable) forEach(ctx context.Context, fn func(fd int32, file *fs.File,
func (f *FDTable) String() string {
var buf strings.Builder
ctx := context.Background()
+ f.mu.Lock()
+ defer f.mu.Unlock()
f.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {
switch {
case file != nil:
@@ -250,10 +254,10 @@ func (f *FDTable) String() string {
}
// NewFDs allocates new FDs guaranteed to be the lowest number available
-// greater than or equal to the fd parameter. All files will share the set
+// greater than or equal to the minFD parameter. All files will share the set
// flags. Success is guaranteed to be all or none.
-func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags FDFlags) (fds []int32, err error) {
- if fd < 0 {
+func (f *FDTable) NewFDs(ctx context.Context, minFD int32, files []*fs.File, flags FDFlags) (fds []int32, err error) {
+ if minFD < 0 {
// Don't accept negative FDs.
return nil, unix.EINVAL
}
@@ -267,31 +271,48 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags
if lim.Cur != limits.Infinity {
end = int32(lim.Cur)
}
- if fd >= end {
+ if minFD+int32(len(files)) > end {
return nil, unix.EMFILE
}
}
f.mu.Lock()
- // From f.next to find available fd.
- if fd < f.next {
- fd = f.next
+ // max is used as the largest number in fdBitmap + 1.
+ max := int32(0)
+
+ if !f.fdBitmap.IsEmpty() {
+ max = int32(f.fdBitmap.Maximum())
+ max++
}
+ // Adjust max in case it is less than minFD.
+ if max < minFD {
+ max = minFD
+ }
// Install all entries.
- for i := fd; i < end && len(fds) < len(files); i++ {
- if d, _, _ := f.get(i); d == nil {
- // Set the descriptor.
- f.set(ctx, i, files[len(fds)], flags)
- fds = append(fds, i) // Record the file descriptor.
+ for len(fds) < len(files) {
+ // Try to use free bit in fdBitmap.
+ // If all bits in fdBitmap are used, expand fd to the max.
+ fd := f.fdBitmap.FirstZero(uint32(minFD))
+ if fd == math.MaxInt32 {
+ fd = uint32(max)
+ max++
+ }
+ if fd >= uint32(end) {
+ break
}
+ f.fdBitmap.Add(fd)
+ f.set(ctx, int32(fd), files[len(fds)], flags)
+ fds = append(fds, int32(fd))
+ minFD = int32(fd)
}
// Failure? Unwind existing FDs.
if len(fds) < len(files) {
for _, i := range fds {
f.set(ctx, i, nil, FDFlags{})
+ f.fdBitmap.Remove(uint32(i))
}
f.mu.Unlock()
@@ -305,20 +326,15 @@ func (f *FDTable) NewFDs(ctx context.Context, fd int32, files []*fs.File, flags
return nil, unix.EMFILE
}
- if fd == f.next {
- // Update next search start position.
- f.next = fds[len(fds)-1] + 1
- }
-
f.mu.Unlock()
return fds, nil
}
// NewFDsVFS2 allocates new FDs guaranteed to be the lowest number available
-// greater than or equal to the fd parameter. All files will share the set
+// greater than or equal to the minFD parameter. All files will share the set
// flags. Success is guaranteed to be all or none.
-func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDescription, flags FDFlags) (fds []int32, err error) {
- if fd < 0 {
+func (f *FDTable) NewFDsVFS2(ctx context.Context, minFD int32, files []*vfs.FileDescription, flags FDFlags) (fds []int32, err error) {
+ if minFD < 0 {
// Don't accept negative FDs.
return nil, unix.EINVAL
}
@@ -332,31 +348,47 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes
if lim.Cur != limits.Infinity {
end = int32(lim.Cur)
}
- if fd >= end {
+ if minFD >= end {
return nil, unix.EMFILE
}
}
f.mu.Lock()
- // From f.next to find available fd.
- if fd < f.next {
- fd = f.next
+ // max is used as the largest number in fdBitmap + 1.
+ max := int32(0)
+
+ if !f.fdBitmap.IsEmpty() {
+ max = int32(f.fdBitmap.Maximum())
+ max++
}
- // Install all entries.
- for i := fd; i < end && len(fds) < len(files); i++ {
- if d, _, _ := f.getVFS2(i); d == nil {
- // Set the descriptor.
- f.setVFS2(ctx, i, files[len(fds)], flags)
- fds = append(fds, i) // Record the file descriptor.
- }
+ // Adjust max in case it is less than minFD.
+ if max < minFD {
+ max = minFD
}
+ for len(fds) < len(files) {
+ // Try to use free bit in fdBitmap.
+ // If all bits in fdBitmap are used, expand fd to the max.
+ fd := f.fdBitmap.FirstZero(uint32(minFD))
+ if fd == math.MaxInt32 {
+ fd = uint32(max)
+ max++
+ }
+ if fd >= uint32(end) {
+ break
+ }
+ f.fdBitmap.Add(fd)
+ f.setVFS2(ctx, int32(fd), files[len(fds)], flags)
+ fds = append(fds, int32(fd))
+ minFD = int32(fd)
+ }
// Failure? Unwind existing FDs.
if len(fds) < len(files) {
for _, i := range fds {
f.setVFS2(ctx, i, nil, FDFlags{})
+ f.fdBitmap.Remove(uint32(i))
}
f.mu.Unlock()
@@ -370,57 +402,19 @@ func (f *FDTable) NewFDsVFS2(ctx context.Context, fd int32, files []*vfs.FileDes
return nil, unix.EMFILE
}
- if fd == f.next {
- // Update next search start position.
- f.next = fds[len(fds)-1] + 1
- }
-
f.mu.Unlock()
return fds, nil
}
-// NewFDVFS2 allocates a file descriptor greater than or equal to minfd for
+// NewFDVFS2 allocates a file descriptor greater than or equal to minFD for
// the given file description. If it succeeds, it takes a reference on file.
-func (f *FDTable) NewFDVFS2(ctx context.Context, minfd int32, file *vfs.FileDescription, flags FDFlags) (int32, error) {
- if minfd < 0 {
- // Don't accept negative FDs.
- return -1, unix.EINVAL
- }
-
- // Default limit.
- end := int32(math.MaxInt32)
-
- // Ensure we don't get past the provided limit.
- if limitSet := limits.FromContext(ctx); limitSet != nil {
- lim := limitSet.Get(limits.NumberOfFiles)
- if lim.Cur != limits.Infinity {
- end = int32(lim.Cur)
- }
- if minfd >= end {
- return -1, unix.EMFILE
- }
- }
-
- f.mu.Lock()
- defer f.mu.Unlock()
-
- // From f.next to find available fd.
- fd := minfd
- if fd < f.next {
- fd = f.next
- }
- for fd < end {
- if d, _, _ := f.getVFS2(fd); d == nil {
- f.setVFS2(ctx, fd, file, flags)
- if fd == f.next {
- // Update next search start position.
- f.next = fd + 1
- }
- return fd, nil
- }
- fd++
+func (f *FDTable) NewFDVFS2(ctx context.Context, minFD int32, file *vfs.FileDescription, flags FDFlags) (int32, error) {
+ files := []*vfs.FileDescription{file}
+ fileSlice, error := f.NewFDsVFS2(ctx, minFD, files, flags)
+ if error != nil {
+ return -1, error
}
- return -1, unix.EMFILE
+ return fileSlice[0], nil
}
// NewFDAt sets the file reference for the given FD. If there is an active
@@ -469,6 +463,11 @@ func (f *FDTable) newFDAt(ctx context.Context, fd int32, file *fs.File, fileVFS2
defer f.mu.Unlock()
df, dfVFS2 := f.setAll(ctx, fd, file, fileVFS2, flags)
+ // Add fd to fdBitmap.
+ if file != nil || fileVFS2 != nil {
+ f.fdBitmap.Add(uint32(fd))
+ }
+
return df, dfVFS2, nil
}
@@ -573,7 +572,9 @@ func (f *FDTable) GetVFS2(fd int32) (*vfs.FileDescription, FDFlags) {
// Precondition: The caller must be running on the task goroutine, or Task.mu
// must be locked.
func (f *FDTable) GetFDs(ctx context.Context) []int32 {
- fds := make([]int32, 0, int(atomic.LoadInt32(&f.used)))
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ fds := make([]int32, 0, int(f.fdBitmap.GetNumOnes()))
f.forEach(ctx, func(fd int32, _ *fs.File, _ *vfs.FileDescription, _ FDFlags) {
fds = append(fds, fd)
})
@@ -583,13 +584,15 @@ func (f *FDTable) GetFDs(ctx context.Context) []int32 {
// Fork returns an independent FDTable.
func (f *FDTable) Fork(ctx context.Context) *FDTable {
clone := f.k.NewFDTable()
-
+ f.mu.Lock()
+ defer f.mu.Unlock()
f.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {
// The set function here will acquire an appropriate table
// reference for the clone. We don't need anything else.
if df, dfVFS2 := clone.setAll(ctx, fd, file, fileVFS2, flags); df != nil || dfVFS2 != nil {
panic("VFS1 or VFS2 files set")
}
+ clone.fdBitmap.Add(uint32(fd))
})
return clone
}
@@ -604,11 +607,6 @@ func (f *FDTable) Remove(ctx context.Context, fd int32) (*fs.File, *vfs.FileDesc
f.mu.Lock()
- // Update current available position.
- if fd < f.next {
- f.next = fd
- }
-
orig, orig2, _, _ := f.getAll(fd)
// Add reference for caller.
@@ -621,6 +619,7 @@ func (f *FDTable) Remove(ctx context.Context, fd int32) (*fs.File, *vfs.FileDesc
if orig != nil || orig2 != nil {
orig, orig2 = f.setAll(ctx, fd, nil, nil, FDFlags{}) // Zap entry.
+ f.fdBitmap.Remove(uint32(fd))
}
f.mu.Unlock()
@@ -644,16 +643,13 @@ func (f *FDTable) RemoveIf(ctx context.Context, cond func(*fs.File, *vfs.FileDes
f.forEach(ctx, func(fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) {
if cond(file, fileVFS2, flags) {
df, dfVFS2 := f.setAll(ctx, fd, nil, nil, FDFlags{}) // Clear from table.
+ f.fdBitmap.Remove(uint32(fd))
if df != nil {
files = append(files, df)
}
if dfVFS2 != nil {
filesVFS2 = append(filesVFS2, dfVFS2)
}
- // Update current available position.
- if fd < f.next {
- f.next = fd
- }
}
})
f.mu.Unlock()
diff --git a/pkg/sentry/kernel/fd_table_unsafe.go b/pkg/sentry/kernel/fd_table_unsafe.go
index f17f9c59c..2b3e6ef71 100644
--- a/pkg/sentry/kernel/fd_table_unsafe.go
+++ b/pkg/sentry/kernel/fd_table_unsafe.go
@@ -15,9 +15,11 @@
package kernel
import (
+ "math"
"sync/atomic"
"unsafe"
+ "gvisor.dev/gvisor/pkg/bitmap"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/vfs"
@@ -44,6 +46,7 @@ func (f *FDTable) initNoLeakCheck() {
func (f *FDTable) init() {
f.initNoLeakCheck()
f.InitRefs()
+ f.fdBitmap = bitmap.New(uint32(math.MaxUint16))
}
// get gets a file entry.
@@ -162,14 +165,6 @@ func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2
}
}
- // Adjust used.
- switch {
- case orig == nil && desc != nil:
- atomic.AddInt32(&f.used, 1)
- case orig != nil && desc == nil:
- atomic.AddInt32(&f.used, -1)
- }
-
if orig != nil {
switch {
case orig.file != nil:
diff --git a/pkg/sentry/kernel/futex/BUILD b/pkg/sentry/kernel/futex/BUILD
index 6c31e082c..c897e3a5f 100644
--- a/pkg/sentry/kernel/futex/BUILD
+++ b/pkg/sentry/kernel/futex/BUILD
@@ -37,11 +37,11 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/sentry/memmap",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
@@ -53,8 +53,8 @@ go_test(
library = ":futex",
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/kernel/futex/futex.go b/pkg/sentry/kernel/futex/futex.go
index 0427cf3f4..2c9ea65aa 100644
--- a/pkg/sentry/kernel/futex/futex.go
+++ b/pkg/sentry/kernel/futex/futex.go
@@ -20,10 +20,10 @@ package futex
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// KeyKind indicates the type of a Key.
@@ -122,7 +122,7 @@ func check(t Target, addr hostarch.Addr, val uint32) error {
return err
}
if cur != val {
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
return nil
}
@@ -165,7 +165,7 @@ func atomicOp(t Target, addr hostarch.Addr, opIn uint32) (bool, error) {
case linux.FUTEX_OP_XOR:
newVal = oldVal ^ opArg
default:
- return false, syserror.ENOSYS
+ return false, linuxerr.ENOSYS
}
prev, err := t.CompareAndSwapUint32(addr, oldVal, newVal)
if err != nil {
@@ -191,7 +191,7 @@ func atomicOp(t Target, addr hostarch.Addr, opIn uint32) (bool, error) {
case linux.FUTEX_OP_CMP_GE:
return oldVal >= cmpArg, nil
default:
- return false, syserror.ENOSYS
+ return false, linuxerr.ENOSYS
}
}
@@ -332,7 +332,7 @@ func getKey(t Target, addr hostarch.Addr, private bool) (Key, error) {
// Ensure the address is aligned.
// It must be a DWORD boundary.
if addr&0x3 != 0 {
- return Key{}, syserror.EINVAL
+ return Key{}, linuxerr.EINVAL
}
if private {
return Key{Kind: KindPrivate, Offset: uint64(addr)}, nil
@@ -397,8 +397,8 @@ func (m *Manager) Fork() *Manager {
}
// lockBucket returns a locked bucket for the given key.
-func (m *Manager) lockBucket(k *Key) *bucket {
- var b *bucket
+// +checklocksacquire:b.mu
+func (m *Manager) lockBucket(k *Key) (b *bucket) {
if k.Kind == KindSharedMappable {
b = m.sharedBucket
} else {
@@ -409,7 +409,9 @@ func (m *Manager) lockBucket(k *Key) *bucket {
}
// lockBuckets returns locked buckets for the given keys.
-func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) {
+// +checklocksacquire:b1.mu
+// +checklocksacquire:b2.mu
+func (m *Manager) lockBuckets(k1, k2 *Key) (b1 *bucket, b2 *bucket) {
// Buckets must be consistently ordered to avoid circular lock
// dependencies. We order buckets in m.privateBuckets by index (lowest
// index first), and all buckets in m.privateBuckets precede
@@ -419,8 +421,8 @@ func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) {
if k1.Kind != KindSharedMappable && k2.Kind != KindSharedMappable {
i1 := bucketIndexForAddr(k1.addr())
i2 := bucketIndexForAddr(k2.addr())
- b1 := &m.privateBuckets[i1]
- b2 := &m.privateBuckets[i2]
+ b1 = &m.privateBuckets[i1]
+ b2 = &m.privateBuckets[i2]
switch {
case i1 < i2:
b1.mu.Lock()
@@ -431,19 +433,30 @@ func (m *Manager) lockBuckets(k1, k2 *Key) (*bucket, *bucket) {
default:
b1.mu.Lock()
}
- return b1, b2
+ return b1, b2 // +checklocksforce
}
// At least one of b1 or b2 should be m.sharedBucket.
- b1 := m.sharedBucket
- b2 := m.sharedBucket
+ b1 = m.sharedBucket
+ b2 = m.sharedBucket
if k1.Kind != KindSharedMappable {
b1 = m.lockBucket(k1)
} else if k2.Kind != KindSharedMappable {
b2 = m.lockBucket(k2)
}
m.sharedBucket.mu.Lock()
- return b1, b2
+ return b1, b2 // +checklocksforce
+}
+
+// unlockBuckets unlocks two buckets.
+// +checklocksrelease:b1.mu
+// +checklocksrelease:b2.mu
+func (m *Manager) unlockBuckets(b1, b2 *bucket) {
+ b1.mu.Unlock()
+ if b1 != b2 {
+ b2.mu.Unlock()
+ }
+ return // +checklocksforce
}
// Wake wakes up to n waiters matching the bitmask on the given addr.
@@ -476,10 +489,7 @@ func (m *Manager) doRequeue(t Target, addr, naddr hostarch.Addr, private bool, c
defer k2.release(t)
b1, b2 := m.lockBuckets(&k1, &k2)
- defer b1.mu.Unlock()
- if b2 != b1 {
- defer b2.mu.Unlock()
- }
+ defer m.unlockBuckets(b1, b2)
if checkval {
if err := check(t, addr, val); err != nil {
@@ -526,10 +536,7 @@ func (m *Manager) WakeOp(t Target, addr1, addr2 hostarch.Addr, private bool, nwa
defer k2.release(t)
b1, b2 := m.lockBuckets(&k1, &k2)
- defer b1.mu.Unlock()
- if b2 != b1 {
- defer b2.mu.Unlock()
- }
+ defer m.unlockBuckets(b1, b2)
done := 0
cond, err := atomicOp(t, addr2, op)
@@ -670,7 +677,7 @@ func (m *Manager) lockPILocked(w *Waiter, t Target, addr hostarch.Addr, tid uint
return false, err
}
if (cur & linux.FUTEX_TID_MASK) == tid {
- return false, syserror.EDEADLK
+ return false, linuxerr.EDEADLK
}
if (cur & linux.FUTEX_TID_MASK) == 0 {
@@ -745,7 +752,7 @@ func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bu
}
if (cur & linux.FUTEX_TID_MASK) != tid {
- return syserror.EPERM
+ return linuxerr.EPERM
}
var next *Waiter // Who's the next owner?
@@ -773,7 +780,7 @@ func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bu
if prev != cur {
// Let user mode handle CAS races. This is different than lock, which
// retries when CAS fails.
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
return nil
}
@@ -790,7 +797,7 @@ func (m *Manager) unlockPILocked(t Target, addr hostarch.Addr, tid uint32, b *bu
return err
}
if prev != cur {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
b.wakeWaiterLocked(next)
diff --git a/pkg/sentry/kernel/futex/futex_test.go b/pkg/sentry/kernel/futex/futex_test.go
index deba44e5c..04c136f87 100644
--- a/pkg/sentry/kernel/futex/futex_test.go
+++ b/pkg/sentry/kernel/futex/futex_test.go
@@ -21,8 +21,8 @@ import (
"testing"
"unsafe"
- "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sync"
)
@@ -488,7 +488,7 @@ func (t *testMutex) Lock() {
// Wait for it to be "not locked".
w := NewWaiter()
err := t.m.WaitPrepare(w, t.d, t.a, true, testMutexLocked, ^uint32(0))
- if err == unix.EAGAIN {
+ if linuxerr.Equals(linuxerr.EAGAIN, err) {
continue
}
if err != nil {
diff --git a/pkg/sentry/kernel/ipc/BUILD b/pkg/sentry/kernel/ipc/BUILD
new file mode 100644
index 000000000..e42a94e15
--- /dev/null
+++ b/pkg/sentry/kernel/ipc/BUILD
@@ -0,0 +1,20 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "ipc",
+ srcs = [
+ "object.go",
+ "registry.go",
+ ],
+ visibility = ["//pkg/sentry:internal"],
+ deps = [
+ "//pkg/abi/linux",
+ "//pkg/context",
+ "//pkg/errors/linuxerr",
+ "//pkg/log",
+ "//pkg/sentry/fs",
+ "//pkg/sentry/kernel/auth",
+ ],
+)
diff --git a/pkg/sentry/kernel/ipc/object.go b/pkg/sentry/kernel/ipc/object.go
new file mode 100644
index 000000000..facd157c7
--- /dev/null
+++ b/pkg/sentry/kernel/ipc/object.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ipc defines functionality and utilities common to sysvipc mechanisms.
+//
+// Lock ordering: [shm/semaphore/msgqueue].Registry.mu -> Mechanism
+package ipc
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/sentry/fs"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+)
+
+// Key is a user-provided identifier for IPC objects.
+type Key int32
+
+// ID is a kernel identifier for IPC objects.
+type ID int32
+
+// Object represents an abstract IPC object with fields common to all IPC
+// mechanisms.
+//
+// +stateify savable
+type Object struct {
+ // User namespace which owns the IPC namespace which owns the IPC object.
+ // Immutable.
+ UserNS *auth.UserNamespace
+
+ // ID is a kernel identifier for the IPC object. Immutable.
+ ID ID
+
+ // Key is a user-provided identifier for the IPC object. Immutable.
+ Key Key
+
+ // Creator is the user who created the IPC object. Immutable.
+ Creator fs.FileOwner
+
+ // Owner is the current owner of the IPC object.
+ Owner fs.FileOwner
+
+ // Perms is the access permissions the IPC object.
+ Perms fs.FilePermissions
+}
+
+// Mechanism represents a SysV mechanism that holds an IPC object. It can also
+// be looked at as a container for an ipc.Object, which is by definition a fully
+// functional SysV object.
+type Mechanism interface {
+ // Lock behaves the same as Mutex.Lock on the mechanism.
+ Lock()
+
+ // Unlock behaves the same as Mutex.Unlock on the mechanism.
+ Unlock()
+
+ // Object returns a pointer to the mechanism's ipc.Object. Mechanism.Lock,
+ // and Mechanism.Unlock should be used when the object is used.
+ Object() *Object
+
+ // Destroy destroys the mechanism.
+ Destroy()
+}
+
+// NewObject returns a new, initialized ipc.Object. The newly returned object
+// doesn't have a valid ID. When the object is registered, the registry assigns
+// it a new unique ID.
+func NewObject(un *auth.UserNamespace, key Key, creator, owner fs.FileOwner, perms fs.FilePermissions) *Object {
+ return &Object{
+ UserNS: un,
+ Key: key,
+ Creator: creator,
+ Owner: owner,
+ Perms: perms,
+ }
+}
+
+// CheckOwnership verifies whether an IPC object may be accessed using creds as
+// an owner. See ipc/util.c:ipcctl_obtain_check() in Linux.
+func (o *Object) CheckOwnership(creds *auth.Credentials) bool {
+ if o.Owner.UID == creds.EffectiveKUID || o.Creator.UID == creds.EffectiveKUID {
+ return true
+ }
+
+ // Tasks with CAP_SYS_ADMIN may bypass ownership checks. Strangely, Linux
+ // doesn't use CAP_IPC_OWNER for this despite CAP_IPC_OWNER being documented
+ // for use to "override IPC ownership checks".
+ return creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, o.UserNS)
+}
+
+// CheckPermissions verifies whether an IPC object is accessible using creds for
+// access described by req. See ipc/util.c:ipcperms() in Linux.
+func (o *Object) CheckPermissions(creds *auth.Credentials, req fs.PermMask) bool {
+ p := o.Perms.Other
+ if o.Owner.UID == creds.EffectiveKUID {
+ p = o.Perms.User
+ } else if creds.InGroup(o.Owner.GID) {
+ p = o.Perms.Group
+ }
+
+ if p.SupersetOf(req) {
+ return true
+ }
+ return creds.HasCapabilityIn(linux.CAP_IPC_OWNER, o.UserNS)
+}
+
+// Set modifies attributes for an IPC object. See *ctl(IPC_SET).
+//
+// Precondition: Mechanism.mu must be held.
+func (o *Object) Set(ctx context.Context, perm *linux.IPCPerm) error {
+ creds := auth.CredentialsFromContext(ctx)
+ uid := creds.UserNamespace.MapToKUID(auth.UID(perm.UID))
+ gid := creds.UserNamespace.MapToKGID(auth.GID(perm.GID))
+ if !uid.Ok() || !gid.Ok() {
+ // The man pages don't specify an errno for invalid uid/gid, but EINVAL
+ // is generally used for invalid arguments.
+ return linuxerr.EINVAL
+ }
+
+ if !o.CheckOwnership(creds) {
+ // "The argument cmd has the value IPC_SET or IPC_RMID, but the
+ // effective user ID of the calling process is not the creator (as
+ // found in msg_perm.cuid) or the owner (as found in msg_perm.uid)
+ // of the message queue, and the caller is not privileged (Linux:
+ // does not have the CAP_SYS_ADMIN capability)."
+ return linuxerr.EPERM
+ }
+
+ // User may only modify the lower 9 bits of the mode. All the other bits are
+ // always 0 for the underlying inode.
+ mode := linux.FileMode(perm.Mode & 0x1ff)
+
+ o.Perms = fs.FilePermsFromMode(mode)
+ o.Owner.UID = uid
+ o.Owner.GID = gid
+
+ return nil
+}
diff --git a/pkg/sentry/kernel/ipc/registry.go b/pkg/sentry/kernel/ipc/registry.go
new file mode 100644
index 000000000..91de19070
--- /dev/null
+++ b/pkg/sentry/kernel/ipc/registry.go
@@ -0,0 +1,196 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/log"
+ "gvisor.dev/gvisor/pkg/sentry/fs"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+)
+
+// Registry is similar to Object, but for registries. It represent an abstract
+// SysV IPC registry with fields common to all SysV registries. Registry is not
+// thread-safe, and should be protected using a mutex.
+//
+// +stateify savable
+type Registry struct {
+ // UserNS owning the IPC namespace this registry belongs to. Immutable.
+ UserNS *auth.UserNamespace
+
+ // objects is a map of IDs to IPC mechanisms.
+ objects map[ID]Mechanism
+
+ // KeysToIDs maps a lookup key to an ID.
+ keysToIDs map[Key]ID
+
+ // lastIDUsed is used to find the next available ID for object creation.
+ lastIDUsed ID
+}
+
+// NewRegistry return a new, initialized ipc.Registry.
+func NewRegistry(userNS *auth.UserNamespace) *Registry {
+ return &Registry{
+ UserNS: userNS,
+ objects: make(map[ID]Mechanism),
+ keysToIDs: make(map[Key]ID),
+ }
+}
+
+// Find uses key to search for and return a SysV mechanism. Find returns an
+// error if an object is found by shouldn't be, or if the user doesn't have
+// permission to use the object. If no object is found, Find checks create
+// flag, and returns an error only if it's false.
+func (r *Registry) Find(ctx context.Context, key Key, mode linux.FileMode, create, exclusive bool) (Mechanism, error) {
+ if id, ok := r.keysToIDs[key]; ok {
+ mech := r.objects[id]
+ mech.Lock()
+ defer mech.Unlock()
+
+ obj := mech.Object()
+ creds := auth.CredentialsFromContext(ctx)
+ if !obj.CheckPermissions(creds, fs.PermsFromMode(mode)) {
+ // The [calling process / user] does not have permission to access
+ // the set, and does not have the CAP_IPC_OWNER capability in the
+ // user namespace that governs its IPC namespace.
+ return nil, linuxerr.EACCES
+ }
+
+ if create && exclusive {
+ // IPC_CREAT and IPC_EXCL were specified, but an object already
+ // exists for key.
+ return nil, linuxerr.EEXIST
+ }
+ return mech, nil
+ }
+
+ if !create {
+ // No object exists for key and msgflg did not specify IPC_CREAT.
+ return nil, linuxerr.ENOENT
+ }
+
+ return nil, nil
+}
+
+// Register adds the given object into Registry.Objects, and assigns it a new
+// ID. It returns an error if all IDs are exhausted.
+func (r *Registry) Register(m Mechanism) error {
+ id, err := r.newID()
+ if err != nil {
+ return err
+ }
+
+ obj := m.Object()
+ obj.ID = id
+
+ r.objects[id] = m
+ r.keysToIDs[obj.Key] = id
+
+ return nil
+}
+
+// newID finds the first unused ID in the registry, and returns an error if
+// non is found.
+func (r *Registry) newID() (ID, error) {
+ // Find the next available ID.
+ for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ {
+ // Handle wrap around.
+ if id < 0 {
+ id = 0
+ continue
+ }
+ if r.objects[id] == nil {
+ r.lastIDUsed = id
+ return id, nil
+ }
+ }
+
+ log.Warningf("ids exhausted, they may be leaking")
+
+ // The man pages for shmget(2) mention that ENOSPC should be used if "All
+ // possible shared memory IDs have been taken (SHMMNI)". Other SysV
+ // mechanisms don't have a specific errno for running out of IDs, but they
+ // return ENOSPC if the max number of objects is exceeded, so we assume that
+ // it's the same case.
+ return 0, linuxerr.ENOSPC
+}
+
+// Remove removes the mechanism with the given id from the registry, and calls
+// mechanism.Destroy to perform mechanism-specific removal.
+func (r *Registry) Remove(id ID, creds *auth.Credentials) error {
+ mech := r.objects[id]
+ if mech == nil {
+ return linuxerr.EINVAL
+ }
+
+ mech.Lock()
+ defer mech.Unlock()
+
+ obj := mech.Object()
+
+ // The effective user ID of the calling process must match the creator or
+ // owner of the [mechanism], or the caller must be privileged.
+ if !obj.CheckOwnership(creds) {
+ return linuxerr.EPERM
+ }
+
+ delete(r.objects, obj.ID)
+ delete(r.keysToIDs, obj.Key)
+ mech.Destroy()
+
+ return nil
+}
+
+// ForAllObjects executes a given function for all given objects.
+func (r *Registry) ForAllObjects(f func(o Mechanism)) {
+ for _, o := range r.objects {
+ f(o)
+ }
+}
+
+// FindByID returns the mechanism with the given ID, nil if non exists.
+func (r *Registry) FindByID(id ID) Mechanism {
+ return r.objects[id]
+}
+
+// DissociateKey removes the association between a mechanism and its key
+// (deletes it from r.keysToIDs), preventing it from being discovered by any new
+// process, but not necessarily destroying it. If the given key doesn't exist,
+// nothing is changed.
+func (r *Registry) DissociateKey(key Key) {
+ delete(r.keysToIDs, key)
+}
+
+// DissociateID removes the association between a mechanism and its ID (deletes
+// it from r.objects). An ID can't be removed unless the associated key is
+// removed already, this is done to prevent the users from acquiring nil a
+// Mechanism.
+//
+// Precondition: must be preceded by a call to r.DissociateKey.
+func (r *Registry) DissociateID(id ID) {
+ delete(r.objects, id)
+}
+
+// ObjectCount returns the number of registered objects.
+func (r *Registry) ObjectCount() int {
+ return len(r.objects)
+}
+
+// LastIDUsed returns the last used ID.
+func (r *Registry) LastIDUsed() ID {
+ return r.lastIDUsed
+}
diff --git a/pkg/sentry/kernel/ipc_namespace.go b/pkg/sentry/kernel/ipc_namespace.go
index 9545bb5ef..0b101b1bb 100644
--- a/pkg/sentry/kernel/ipc_namespace.go
+++ b/pkg/sentry/kernel/ipc_namespace.go
@@ -17,6 +17,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/msgqueue"
"gvisor.dev/gvisor/pkg/sentry/kernel/semaphore"
"gvisor.dev/gvisor/pkg/sentry/kernel/shm"
)
@@ -30,6 +31,7 @@ type IPCNamespace struct {
// User namespace which owns this IPC namespace. Immutable.
userNS *auth.UserNamespace
+ queues *msgqueue.Registry
semaphores *semaphore.Registry
shms *shm.Registry
}
@@ -38,6 +40,7 @@ type IPCNamespace struct {
func NewIPCNamespace(userNS *auth.UserNamespace) *IPCNamespace {
ns := &IPCNamespace{
userNS: userNS,
+ queues: msgqueue.NewRegistry(userNS),
semaphores: semaphore.NewRegistry(userNS),
shms: shm.NewRegistry(userNS),
}
@@ -45,6 +48,11 @@ func NewIPCNamespace(userNS *auth.UserNamespace) *IPCNamespace {
return ns
}
+// MsgqueueRegistry returns the message queue registry for this namespace.
+func (i *IPCNamespace) MsgqueueRegistry() *msgqueue.Registry {
+ return i.queues
+}
+
// SemaphoreRegistry returns the semaphore set registry for this namespace.
func (i *IPCNamespace) SemaphoreRegistry() *semaphore.Registry {
return i.semaphores
diff --git a/pkg/sentry/kernel/kcov.go b/pkg/sentry/kernel/kcov.go
index 4b943106b..e8a71bec1 100644
--- a/pkg/sentry/kernel/kcov.go
+++ b/pkg/sentry/kernel/kcov.go
@@ -22,13 +22,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/coverage"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// kcovAreaSizeMax is the maximum number of uint64 entries allowed in the kcov
@@ -125,19 +125,19 @@ func (kcov *Kcov) InitTrace(size uint64) error {
defer kcov.mu.Unlock()
if kcov.mode != linux.KCOV_MODE_DISABLED {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// To simplify all the logic around mapping, we require that the length of the
// shared region is a multiple of the system page size.
if (8*size)&(hostarch.PageSize-1) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// We need space for at least two uint64s to hold current position and a
// single PC.
if size < 2 || size > kcovAreaSizeMax {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
kcov.size = size
@@ -157,7 +157,7 @@ func (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {
// KCOV_ENABLE must be preceded by KCOV_INIT_TRACE and an mmap call.
if kcov.mode != linux.KCOV_MODE_INIT || kcov.mappable == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
switch traceKind {
@@ -165,13 +165,13 @@ func (kcov *Kcov) EnableTrace(ctx context.Context, traceKind uint8) error {
kcov.mode = linux.KCOV_MODE_TRACE_PC
case linux.KCOV_TRACE_CMP:
// We do not support KCOV_MODE_TRACE_CMP.
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if kcov.owningTask != nil && kcov.owningTask != t {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
kcov.owningTask = t
@@ -195,7 +195,7 @@ func (kcov *Kcov) DisableTrace(ctx context.Context) error {
}
if t != kcov.owningTask {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
kcov.mode = linux.KCOV_MODE_INIT
kcov.owningTask = nil
@@ -237,7 +237,7 @@ func (kcov *Kcov) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) erro
defer kcov.mu.Unlock()
if kcov.mode != linux.KCOV_MODE_INIT {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if kcov.mappable == nil {
diff --git a/pkg/sentry/kernel/kernel.go b/pkg/sentry/kernel/kernel.go
index d537e608a..df5160b67 100644
--- a/pkg/sentry/kernel/kernel.go
+++ b/pkg/sentry/kernel/kernel.go
@@ -143,12 +143,6 @@ type Kernel struct {
// to CreateProcess, and is protected by extMu.
globalInit *ThreadGroup
- // realtimeClock is a ktime.Clock based on timekeeper's Realtime.
- realtimeClock *timekeeperClock
-
- // monotonicClock is a ktime.Clock based on timekeeper's Monotonic.
- monotonicClock *timekeeperClock
-
// syslog is the kernel log.
syslog syslog
@@ -306,6 +300,9 @@ type InitKernelArgs struct {
// FeatureSet is the emulated CPU feature set.
FeatureSet *cpuid.FeatureSet
+ // Timekeeper manages time for all tasks in the system.
+ Timekeeper *Timekeeper
+
// RootUserNamespace is the root user namespace.
RootUserNamespace *auth.UserNamespace
@@ -345,24 +342,18 @@ type InitKernelArgs struct {
PIDNamespace *PIDNamespace
}
-// SetTimekeeper sets Kernel.timekeeper. SetTimekeeper must be called before
-// Init.
-func (k *Kernel) SetTimekeeper(tk *Timekeeper) {
- k.timekeeper = tk
-}
-
// Init initialize the Kernel with no tasks.
//
// Callers must manually set Kernel.Platform and call Kernel.SetMemoryFile
-// and Kernel.SetTimekeeper before calling Init.
+// before calling Init.
func (k *Kernel) Init(args InitKernelArgs) error {
if args.FeatureSet == nil {
return fmt.Errorf("args.FeatureSet is nil")
}
- if k.timekeeper == nil {
- return fmt.Errorf("timekeeper is nil")
+ if args.Timekeeper == nil {
+ return fmt.Errorf("args.Timekeeper is nil")
}
- if k.timekeeper.clocks == nil {
+ if args.Timekeeper.clocks == nil {
return fmt.Errorf("must call Timekeeper.SetClocks() before Kernel.Init()")
}
if args.RootUserNamespace == nil {
@@ -373,6 +364,7 @@ func (k *Kernel) Init(args InitKernelArgs) error {
}
k.featureSet = args.FeatureSet
+ k.timekeeper = args.Timekeeper
k.tasks = newTaskSet(args.PIDNamespace)
k.rootUserNamespace = args.RootUserNamespace
k.rootUTSNamespace = args.RootUTSNamespace
@@ -397,8 +389,6 @@ func (k *Kernel) Init(args InitKernelArgs) error {
}
k.extraAuxv = args.ExtraAuxv
k.vdso = args.Vdso
- k.realtimeClock = &timekeeperClock{tk: k.timekeeper, c: sentrytime.Realtime}
- k.monotonicClock = &timekeeperClock{tk: k.timekeeper, c: sentrytime.Monotonic}
k.futexes = futex.NewManager()
k.netlinkPorts = port.New()
k.ptraceExceptions = make(map[*Task]*Task)
@@ -531,6 +521,8 @@ func (k *Kernel) SaveTo(ctx context.Context, w wire.Writer) error {
}
log.Infof("CPUID save took [%s].", time.Since(cpuidStart))
+ // Save the timekeeper's state.
+
// Save the kernel state.
kernelStart := time.Now()
stats, err := state.Save(ctx, w, k)
@@ -675,7 +667,7 @@ func (k *Kernel) invalidateUnsavableMappings(ctx context.Context) error {
}
// LoadFrom returns a new Kernel loaded from args.
-func (k *Kernel) LoadFrom(ctx context.Context, r wire.Reader, net inet.Stack, clocks sentrytime.Clocks, vfsOpts *vfs.CompleteRestoreOptions) error {
+func (k *Kernel) LoadFrom(ctx context.Context, r wire.Reader, timeReady chan struct{}, net inet.Stack, clocks sentrytime.Clocks, vfsOpts *vfs.CompleteRestoreOptions) error {
loadStart := time.Now()
initAppCores := k.applicationCores
@@ -722,6 +714,11 @@ func (k *Kernel) LoadFrom(ctx context.Context, r wire.Reader, net inet.Stack, cl
log.Infof("Overall load took [%s]", time.Since(loadStart))
k.Timekeeper().SetClocks(clocks)
+
+ if timeReady != nil {
+ close(timeReady)
+ }
+
if net != nil {
net.Resume()
}
@@ -1103,7 +1100,7 @@ func (k *Kernel) Start() error {
}
k.started = true
- k.cpuClockTicker = ktime.NewTimer(k.monotonicClock, newKernelCPUClockTicker(k))
+ k.cpuClockTicker = ktime.NewTimer(k.timekeeper.monotonicClock, newKernelCPUClockTicker(k))
k.cpuClockTicker.Swap(ktime.Setting{
Enabled: true,
Period: linux.ClockTick,
@@ -1258,7 +1255,7 @@ func (k *Kernel) incRunningTasks() {
// These cause very different value of cpuClock. But again, since
// nothing was running while the ticker was disabled, those differences
// don't matter.
- setting, exp := k.cpuClockTickerSetting.At(k.monotonicClock.Now())
+ setting, exp := k.cpuClockTickerSetting.At(k.timekeeper.monotonicClock.Now())
if exp > 0 {
atomic.AddUint64(&k.cpuClock, exp)
}
@@ -1302,11 +1299,11 @@ func (k *Kernel) WaitExited() {
}
// Kill requests that all tasks in k immediately exit as if group exiting with
-// status es. Kill does not wait for tasks to exit.
-func (k *Kernel) Kill(es ExitStatus) {
+// status ws. Kill does not wait for tasks to exit.
+func (k *Kernel) Kill(ws linux.WaitStatus) {
k.extMu.Lock()
defer k.extMu.Unlock()
- k.tasks.Kill(es)
+ k.tasks.Kill(ws)
}
// Pause requests that all tasks in k temporarily stop executing, and blocks
@@ -1468,12 +1465,12 @@ func (k *Kernel) ApplicationCores() uint {
// RealtimeClock returns the application CLOCK_REALTIME clock.
func (k *Kernel) RealtimeClock() ktime.Clock {
- return k.realtimeClock
+ return k.timekeeper.realtimeClock
}
// MonotonicClock returns the application CLOCK_MONOTONIC clock.
func (k *Kernel) MonotonicClock() ktime.Clock {
- return k.monotonicClock
+ return k.timekeeper.monotonicClock
}
// CPUClockNow returns the current value of k.cpuClock.
@@ -1553,32 +1550,6 @@ func (k *Kernel) SetSaveError(err error) {
}
}
-var _ tcpip.Clock = (*Kernel)(nil)
-
-// Now implements tcpip.Clock.NowNanoseconds.
-func (k *Kernel) Now() time.Time {
- nsec, err := k.timekeeper.GetTime(sentrytime.Realtime)
- if err != nil {
- panic("timekeeper.GetTime(sentrytime.Realtime): " + err.Error())
- }
- return time.Unix(0, nsec)
-}
-
-// NowMonotonic implements tcpip.Clock.NowMonotonic.
-func (k *Kernel) NowMonotonic() tcpip.MonotonicTime {
- nsec, err := k.timekeeper.GetTime(sentrytime.Monotonic)
- if err != nil {
- panic("timekeeper.GetTime(sentrytime.Monotonic): " + err.Error())
- }
- var mt tcpip.MonotonicTime
- return mt.Add(time.Duration(nsec) * time.Nanosecond)
-}
-
-// AfterFunc implements tcpip.Clock.AfterFunc.
-func (k *Kernel) AfterFunc(d time.Duration, f func()) tcpip.Timer {
- return ktime.TcpipAfterFunc(k.realtimeClock, d, f)
-}
-
// SetMemoryFile sets Kernel.mf. SetMemoryFile must be called before Init or
// LoadFrom.
func (k *Kernel) SetMemoryFile(mf *pgalloc.MemoryFile) {
diff --git a/pkg/sentry/kernel/kernel_opts.go b/pkg/sentry/kernel/kernel_opts.go
index 2e66ec587..5ffafb0d1 100644
--- a/pkg/sentry/kernel/kernel_opts.go
+++ b/pkg/sentry/kernel/kernel_opts.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package kernel
// SpecialOpts contains non-standard options for the kernel.
diff --git a/pkg/sentry/kernel/msgqueue/BUILD b/pkg/sentry/kernel/msgqueue/BUILD
new file mode 100644
index 000000000..5ec11e1f6
--- /dev/null
+++ b/pkg/sentry/kernel/msgqueue/BUILD
@@ -0,0 +1,36 @@
+load("//tools:defs.bzl", "go_library")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
+
+package(licenses = ["notice"])
+
+go_template_instance(
+ name = "message_list",
+ out = "message_list.go",
+ package = "msgqueue",
+ prefix = "msg",
+ template = "//pkg/ilist:generic_list",
+ types = {
+ "Element": "*Message",
+ "Linker": "*Message",
+ },
+)
+
+go_library(
+ name = "msgqueue",
+ srcs = [
+ "message_list.go",
+ "msgqueue.go",
+ ],
+ visibility = ["//pkg/sentry:internal"],
+ deps = [
+ "//pkg/abi/linux",
+ "//pkg/context",
+ "//pkg/errors/linuxerr",
+ "//pkg/sentry/fs",
+ "//pkg/sentry/kernel/auth",
+ "//pkg/sentry/kernel/ipc",
+ "//pkg/sentry/kernel/time",
+ "//pkg/sync",
+ "//pkg/waiter",
+ ],
+)
diff --git a/pkg/sentry/kernel/msgqueue/msgqueue.go b/pkg/sentry/kernel/msgqueue/msgqueue.go
new file mode 100644
index 000000000..c7c5e41fb
--- /dev/null
+++ b/pkg/sentry/kernel/msgqueue/msgqueue.go
@@ -0,0 +1,618 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package msgqueue implements System V message queues.
+package msgqueue
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/sentry/fs"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
+ ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
+ "gvisor.dev/gvisor/pkg/sync"
+ "gvisor.dev/gvisor/pkg/waiter"
+)
+
+const (
+ // System-wide limit for maximum number of queues.
+ maxQueues = linux.MSGMNI
+
+ // Maximum size of a queue in bytes.
+ maxQueueBytes = linux.MSGMNB
+
+ // Maximum size of a message in bytes.
+ maxMessageBytes = linux.MSGMAX
+)
+
+// Registry contains a set of message queues that can be referenced using keys
+// or IDs.
+//
+// +stateify savable
+type Registry struct {
+ // mu protects all the fields below.
+ mu sync.Mutex `state:"nosave"`
+
+ // reg defines basic fields and operations needed for all SysV registries.
+ reg *ipc.Registry
+}
+
+// NewRegistry returns a new Registry ready to be used.
+func NewRegistry(userNS *auth.UserNamespace) *Registry {
+ return &Registry{
+ reg: ipc.NewRegistry(userNS),
+ }
+}
+
+// Queue represents a SysV message queue, described by sysvipc(7).
+//
+// +stateify savable
+type Queue struct {
+ // registry is the registry owning this queue. Immutable.
+ registry *Registry
+
+ // mu protects all the fields below.
+ mu sync.Mutex `state:"nosave"`
+
+ // dead is set to true when a queue is removed from the registry and should
+ // not be used. Operations on the queue should check dead, and return
+ // EIDRM if set to true.
+ dead bool
+
+ // obj defines basic fields that should be included in all SysV IPC objects.
+ obj *ipc.Object
+
+ // senders holds a queue of blocked message senders. Senders are notified
+ // when enough space is available in the queue to insert their message.
+ senders waiter.Queue
+
+ // receivers holds a queue of blocked receivers. Receivers are notified
+ // when a new message is inserted into the queue and can be received.
+ receivers waiter.Queue
+
+ // messages is a list of sent messages.
+ messages msgList
+
+ // sendTime is the last time a msgsnd was perfomed.
+ sendTime ktime.Time
+
+ // receiveTime is the last time a msgrcv was performed.
+ receiveTime ktime.Time
+
+ // changeTime is the last time the queue was modified using msgctl.
+ changeTime ktime.Time
+
+ // byteCount is the current number of message bytes in the queue.
+ byteCount uint64
+
+ // messageCount is the current number of messages in the queue.
+ messageCount uint64
+
+ // maxBytes is the maximum allowed number of bytes in the queue, and is also
+ // used as a limit for the number of total possible messages.
+ maxBytes uint64
+
+ // sendPID is the PID of the process that performed the last msgsnd.
+ sendPID int32
+
+ // receivePID is the PID of the process that performed the last msgrcv.
+ receivePID int32
+}
+
+// Message represents a message exchanged through a Queue via msgsnd(2) and
+// msgrcv(2).
+//
+// +stateify savable
+type Message struct {
+ msgEntry
+
+ // Type is an integer representing the type of the sent message.
+ Type int64
+
+ // Text is an untyped block of memory.
+ Text []byte
+
+ // Size is the size of Text.
+ Size uint64
+}
+
+func (m *Message) makeCopy() *Message {
+ new := &Message{
+ Type: m.Type,
+ Size: m.Size,
+ }
+ new.Text = make([]byte, len(m.Text))
+ copy(new.Text, m.Text)
+ return new
+}
+
+// Blocker is used for blocking Queue.Send, and Queue.Receive calls that serves
+// as an abstracted version of kernel.Task. kernel.Task is not directly used to
+// prevent circular dependencies.
+type Blocker interface {
+ Block(C <-chan struct{}) error
+}
+
+// FindOrCreate creates a new message queue or returns an existing one. See
+// msgget(2).
+func (r *Registry) FindOrCreate(ctx context.Context, key ipc.Key, mode linux.FileMode, private, create, exclusive bool) (*Queue, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if !private {
+ queue, err := r.reg.Find(ctx, key, mode, create, exclusive)
+ if err != nil {
+ return nil, err
+ }
+
+ if queue != nil {
+ return queue.(*Queue), nil
+ }
+ }
+
+ // Check system-wide limits.
+ if r.reg.ObjectCount() >= maxQueues {
+ return nil, linuxerr.ENOSPC
+ }
+
+ return r.newQueueLocked(ctx, key, fs.FileOwnerFromContext(ctx), fs.FilePermsFromMode(mode))
+}
+
+// newQueueLocked creates a new queue using the given fields. An error is
+// returned if there're no more available identifiers.
+//
+// Precondition: r.mu must be held.
+func (r *Registry) newQueueLocked(ctx context.Context, key ipc.Key, creator fs.FileOwner, perms fs.FilePermissions) (*Queue, error) {
+ q := &Queue{
+ registry: r,
+ obj: ipc.NewObject(r.reg.UserNS, key, creator, creator, perms),
+ sendTime: ktime.ZeroTime,
+ receiveTime: ktime.ZeroTime,
+ changeTime: ktime.NowFromContext(ctx),
+ maxBytes: maxQueueBytes,
+ }
+
+ err := r.reg.Register(q)
+ if err != nil {
+ return nil, err
+ }
+ return q, nil
+}
+
+// Remove removes the queue with specified ID. All waiters (readers and
+// writers) and writers will be awakened and fail. Remove will return an error
+// if the ID is invalid, or the the user doesn't have privileges.
+func (r *Registry) Remove(id ipc.ID, creds *auth.Credentials) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.reg.Remove(id, creds)
+ return nil
+}
+
+// FindByID returns the queue with the specified ID and an error if the ID
+// doesn't exist.
+func (r *Registry) FindByID(id ipc.ID) (*Queue, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ mech := r.reg.FindByID(id)
+ if mech == nil {
+ return nil, linuxerr.EINVAL
+ }
+ return mech.(*Queue), nil
+}
+
+// IPCInfo reports global parameters for message queues. See msgctl(IPC_INFO).
+func (r *Registry) IPCInfo(ctx context.Context) *linux.MsgInfo {
+ return &linux.MsgInfo{
+ MsgPool: linux.MSGPOOL,
+ MsgMap: linux.MSGMAP,
+ MsgMax: linux.MSGMAX,
+ MsgMnb: linux.MSGMNB,
+ MsgMni: linux.MSGMNI,
+ MsgSsz: linux.MSGSSZ,
+ MsgTql: linux.MSGTQL,
+ MsgSeg: linux.MSGSEG,
+ }
+}
+
+// MsgInfo reports global parameters for message queues. See msgctl(MSG_INFO).
+func (r *Registry) MsgInfo(ctx context.Context) *linux.MsgInfo {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ var messages, bytes uint64
+ r.reg.ForAllObjects(
+ func(o ipc.Mechanism) {
+ q := o.(*Queue)
+ q.mu.Lock()
+ messages += q.messageCount
+ bytes += q.byteCount
+ q.mu.Unlock()
+ },
+ )
+
+ return &linux.MsgInfo{
+ MsgPool: int32(r.reg.ObjectCount()),
+ MsgMap: int32(messages),
+ MsgTql: int32(bytes),
+ MsgMax: linux.MSGMAX,
+ MsgMnb: linux.MSGMNB,
+ MsgMni: linux.MSGMNI,
+ MsgSsz: linux.MSGSSZ,
+ MsgSeg: linux.MSGSEG,
+ }
+}
+
+// Send appends a message to the message queue, and returns an error if sending
+// fails. See msgsnd(2).
+func (q *Queue) Send(ctx context.Context, m Message, b Blocker, wait bool, pid int32) error {
+ // Try to perform a non-blocking send using queue.append. If EWOULDBLOCK
+ // is returned, start the blocking procedure. Otherwise, return normally.
+ creds := auth.CredentialsFromContext(ctx)
+
+ // Fast path: first attempt a non-blocking push.
+ if err := q.push(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {
+ return err
+ }
+
+ if !wait {
+ return linuxerr.EAGAIN
+ }
+
+ // Slow path: at this point, the queue was found to be full, and we were
+ // asked to block.
+
+ e, ch := waiter.NewChannelEntry(nil)
+ q.senders.EventRegister(&e, waiter.EventOut)
+ defer q.senders.EventUnregister(&e)
+
+ // Note: we need to check again before blocking the first time since space
+ // may have become available.
+ for {
+ if err := q.push(ctx, m, creds, pid); err != linuxerr.EWOULDBLOCK {
+ return err
+ }
+ if err := b.Block(ch); err != nil {
+ return err
+ }
+ }
+}
+
+// push appends a message to the queue's message list and notifies waiting
+// receivers that a message has been inserted. It returns an error if adding
+// the message would cause the queue to exceed its maximum capacity, which can
+// be used as a signal to block the task. Other errors should be returned as is.
+func (q *Queue) push(ctx context.Context, m Message, creds *auth.Credentials, pid int32) error {
+ if m.Type <= 0 {
+ return linuxerr.EINVAL
+ }
+
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ if !q.obj.CheckPermissions(creds, fs.PermMask{Write: true}) {
+ // The calling process does not have write permission on the message
+ // queue, and does not have the CAP_IPC_OWNER capability in the user
+ // namespace that governs its IPC namespace.
+ return linuxerr.EACCES
+ }
+
+ // Queue was removed while the process was waiting.
+ if q.dead {
+ return linuxerr.EIDRM
+ }
+
+ // Check if sufficient space is available (the queue isn't full.) From
+ // the man pages:
+ //
+ // "A message queue is considered to be full if either of the following
+ // conditions is true:
+ //
+ // • Adding a new message to the queue would cause the total number
+ // of bytes in the queue to exceed the queue's maximum size (the
+ // msg_qbytes field).
+ //
+ // • Adding another message to the queue would cause the total
+ // number of messages in the queue to exceed the queue's maximum
+ // size (the msg_qbytes field). This check is necessary to
+ // prevent an unlimited number of zero-length messages being
+ // placed on the queue. Although such messages contain no data,
+ // they nevertheless consume (locked) kernel memory."
+ //
+ // The msg_qbytes field in our implementation is q.maxBytes.
+ if m.Size+q.byteCount > q.maxBytes || q.messageCount+1 > q.maxBytes {
+ return linuxerr.EWOULDBLOCK
+ }
+
+ // Copy the message into the queue.
+ q.messages.PushBack(&m)
+
+ q.byteCount += m.Size
+ q.messageCount++
+ q.sendPID = pid
+ q.sendTime = ktime.NowFromContext(ctx)
+
+ // Notify receivers about the new message.
+ q.receivers.Notify(waiter.EventIn)
+
+ return nil
+}
+
+// Receive removes a message from the queue and returns it. See msgrcv(2).
+func (q *Queue) Receive(ctx context.Context, b Blocker, mType int64, maxSize int64, wait, truncate, except bool, pid int32) (*Message, error) {
+ if maxSize < 0 || maxSize > maxMessageBytes {
+ return nil, linuxerr.EINVAL
+ }
+ max := uint64(maxSize)
+ creds := auth.CredentialsFromContext(ctx)
+
+ // Fast path: first attempt a non-blocking pop.
+ if msg, err := q.pop(ctx, creds, mType, max, truncate, except, pid); err != linuxerr.EWOULDBLOCK {
+ return msg, err
+ }
+
+ if !wait {
+ return nil, linuxerr.ENOMSG
+ }
+
+ // Slow path: at this point, the queue was found to be empty, and we were
+ // asked to block.
+
+ e, ch := waiter.NewChannelEntry(nil)
+ q.receivers.EventRegister(&e, waiter.EventIn)
+ defer q.receivers.EventUnregister(&e)
+
+ // Note: we need to check again before blocking the first time since a
+ // message may have become available.
+ for {
+ if msg, err := q.pop(ctx, creds, mType, max, truncate, except, pid); err != linuxerr.EWOULDBLOCK {
+ return msg, err
+ }
+ if err := b.Block(ch); err != nil {
+ return nil, err
+ }
+ }
+}
+
+// pop pops the first message from the queue that matches the given type. It
+// returns an error for all the cases specified in msgrcv(2). If the queue is
+// empty or no message of the specified type is available, a EWOULDBLOCK error
+// is returned, which can then be used as a signal to block the process or fail.
+func (q *Queue) pop(ctx context.Context, creds *auth.Credentials, mType int64, maxSize uint64, truncate, except bool, pid int32) (*Message, error) {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ if !q.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
+ // The calling process does not have read permission on the message
+ // queue, and does not have the CAP_IPC_OWNER capability in the user
+ // namespace that governs its IPC namespace.
+ return nil, linuxerr.EACCES
+ }
+
+ // Queue was removed while the process was waiting.
+ if q.dead {
+ return nil, linuxerr.EIDRM
+ }
+
+ if q.messages.Empty() {
+ return nil, linuxerr.EWOULDBLOCK
+ }
+
+ // Get a message from the queue.
+ var msg *Message
+ switch {
+ case mType == 0:
+ msg = q.messages.Front()
+ case mType > 0:
+ msg = q.msgOfType(mType, except)
+ case mType < 0:
+ msg = q.msgOfTypeLessThan(-1 * mType)
+ }
+
+ // If no message exists, return a blocking singal.
+ if msg == nil {
+ return nil, linuxerr.EWOULDBLOCK
+ }
+
+ // Check message's size is acceptable.
+ if maxSize < msg.Size {
+ if !truncate {
+ return nil, linuxerr.E2BIG
+ }
+ msg.Size = maxSize
+ msg.Text = msg.Text[:maxSize+1]
+ }
+
+ q.messages.Remove(msg)
+
+ q.byteCount -= msg.Size
+ q.messageCount--
+ q.receivePID = pid
+ q.receiveTime = ktime.NowFromContext(ctx)
+
+ // Notify senders about available space.
+ q.senders.Notify(waiter.EventOut)
+
+ return msg, nil
+}
+
+// Copy copies a message from the queue without deleting it. If no message
+// exists, an error is returned. See msgrcv(MSG_COPY).
+func (q *Queue) Copy(mType int64) (*Message, error) {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ if mType < 0 || q.messages.Empty() {
+ return nil, linuxerr.ENOMSG
+ }
+
+ msg := q.msgAtIndex(mType)
+ if msg == nil {
+ return nil, linuxerr.ENOMSG
+ }
+ return msg.makeCopy(), nil
+}
+
+// msgOfType returns the first message with the specified type, nil if no
+// message is found. If except is true, the first message of a type not equal
+// to mType will be returned.
+//
+// Precondition: caller must hold q.mu.
+func (q *Queue) msgOfType(mType int64, except bool) *Message {
+ if except {
+ for msg := q.messages.Front(); msg != nil; msg = msg.Next() {
+ if msg.Type != mType {
+ return msg
+ }
+ }
+ return nil
+ }
+
+ for msg := q.messages.Front(); msg != nil; msg = msg.Next() {
+ if msg.Type == mType {
+ return msg
+ }
+ }
+ return nil
+}
+
+// msgOfTypeLessThan return the the first message with the lowest type less
+// than or equal to mType, nil if no such message exists.
+//
+// Precondition: caller must hold q.mu.
+func (q *Queue) msgOfTypeLessThan(mType int64) (m *Message) {
+ min := mType
+ for msg := q.messages.Front(); msg != nil; msg = msg.Next() {
+ if msg.Type <= mType && msg.Type < min {
+ m = msg
+ min = msg.Type
+ }
+ }
+ return m
+}
+
+// msgAtIndex returns a pointer to a message at given index, nil if non exits.
+//
+// Precondition: caller must hold q.mu.
+func (q *Queue) msgAtIndex(mType int64) *Message {
+ msg := q.messages.Front()
+ for ; mType != 0 && msg != nil; mType-- {
+ msg = msg.Next()
+ }
+ return msg
+}
+
+// Set modifies some values of the queue. See msgctl(IPC_SET).
+func (q *Queue) Set(ctx context.Context, ds *linux.MsqidDS) error {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ creds := auth.CredentialsFromContext(ctx)
+ if ds.MsgQbytes > maxQueueBytes && !creds.HasCapabilityIn(linux.CAP_SYS_RESOURCE, q.obj.UserNS) {
+ // "An attempt (IPC_SET) was made to increase msg_qbytes beyond the
+ // system parameter MSGMNB, but the caller is not privileged (Linux:
+ // does not have the CAP_SYS_RESOURCE capability)."
+ return linuxerr.EPERM
+ }
+
+ if err := q.obj.Set(ctx, &ds.MsgPerm); err != nil {
+ return err
+ }
+
+ q.maxBytes = ds.MsgQbytes
+ q.changeTime = ktime.NowFromContext(ctx)
+ return nil
+}
+
+// Stat returns a MsqidDS object filled with information about the queue. See
+// msgctl(IPC_STAT) and msgctl(MSG_STAT).
+func (q *Queue) Stat(ctx context.Context) (*linux.MsqidDS, error) {
+ return q.stat(ctx, fs.PermMask{Read: true})
+}
+
+// StatAny is similar to Queue.Stat, but doesn't require read permission. See
+// msgctl(MSG_STAT_ANY).
+func (q *Queue) StatAny(ctx context.Context) (*linux.MsqidDS, error) {
+ return q.stat(ctx, fs.PermMask{})
+}
+
+// stat returns a MsqidDS object filled with information about the queue. An
+// error is returned if the user doesn't have the specified permissions.
+func (q *Queue) stat(ctx context.Context, mask fs.PermMask) (*linux.MsqidDS, error) {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ creds := auth.CredentialsFromContext(ctx)
+ if !q.obj.CheckPermissions(creds, mask) {
+ // "The caller must have read permission on the message queue."
+ return nil, linuxerr.EACCES
+ }
+
+ return &linux.MsqidDS{
+ MsgPerm: linux.IPCPerm{
+ Key: uint32(q.obj.Key),
+ UID: uint32(creds.UserNamespace.MapFromKUID(q.obj.Owner.UID)),
+ GID: uint32(creds.UserNamespace.MapFromKGID(q.obj.Owner.GID)),
+ CUID: uint32(creds.UserNamespace.MapFromKUID(q.obj.Creator.UID)),
+ CGID: uint32(creds.UserNamespace.MapFromKGID(q.obj.Creator.GID)),
+ Mode: uint16(q.obj.Perms.LinuxMode()),
+ Seq: 0, // IPC sequences not supported.
+ },
+ MsgStime: q.sendTime.TimeT(),
+ MsgRtime: q.receiveTime.TimeT(),
+ MsgCtime: q.changeTime.TimeT(),
+ MsgCbytes: q.byteCount,
+ MsgQnum: q.messageCount,
+ MsgQbytes: q.maxBytes,
+ MsgLspid: q.sendPID,
+ MsgLrpid: q.receivePID,
+ }, nil
+}
+
+// Lock implements ipc.Mechanism.Lock.
+func (q *Queue) Lock() {
+ q.mu.Lock()
+}
+
+// Unlock implements ipc.mechanism.Unlock.
+//
+// +checklocksignore
+func (q *Queue) Unlock() {
+ q.mu.Unlock()
+}
+
+// Object implements ipc.Mechanism.Object.
+func (q *Queue) Object() *ipc.Object {
+ return q.obj
+}
+
+// Destroy implements ipc.Mechanism.Destroy.
+func (q *Queue) Destroy() {
+ q.dead = true
+
+ // Notify waiters. Senders and receivers will try to run, and return an
+ // error (EIDRM). Waiters should remove themselves from the queue after
+ // waking up.
+ q.senders.Notify(waiter.EventOut)
+ q.receivers.Notify(waiter.EventIn)
+}
+
+// ID returns queue's ID.
+func (q *Queue) ID() ipc.ID {
+ return q.obj.ID
+}
diff --git a/pkg/sentry/kernel/pipe/BUILD b/pkg/sentry/kernel/pipe/BUILD
index 34c617b08..5b2bac783 100644
--- a/pkg/sentry/kernel/pipe/BUILD
+++ b/pkg/sentry/kernel/pipe/BUILD
@@ -21,6 +21,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/amutex",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/marshal/primitive",
"//pkg/safemem",
@@ -30,7 +31,6 @@ go_library(
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -47,9 +47,9 @@ go_test(
library = ":pipe",
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/contexttest",
"//pkg/sentry/fs",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/kernel/pipe/node.go b/pkg/sentry/kernel/pipe/node.go
index 6497dc4ba..615591507 100644
--- a/pkg/sentry/kernel/pipe/node.go
+++ b/pkg/sentry/kernel/pipe/node.go
@@ -17,10 +17,10 @@ package pipe
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// inodeOperations implements fs.InodeOperations for pipes.
@@ -94,7 +94,7 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
if i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() {
if !waitFor(&i.mu, &i.wWakeup, ctx) {
r.DecRef(ctx)
- return nil, syserror.ErrInterrupted
+ return nil, linuxerr.ErrInterrupted
}
}
@@ -112,12 +112,12 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
// read side isn't open yet.
if flags.NonBlocking {
w.DecRef(ctx)
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
if !waitFor(&i.mu, &i.rWakeup, ctx) {
w.DecRef(ctx)
- return nil, syserror.ErrInterrupted
+ return nil, linuxerr.ErrInterrupted
}
}
return w, nil
@@ -130,10 +130,10 @@ func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.Fi
return rw, nil
default:
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
}
func (*inodeOperations) Allocate(_ context.Context, _ *fs.Inode, _, _ int64) error {
- return syserror.EPIPE
+ return linuxerr.EPIPE
}
diff --git a/pkg/sentry/kernel/pipe/node_test.go b/pkg/sentry/kernel/pipe/node_test.go
index d6fb0fdb8..31bd7910a 100644
--- a/pkg/sentry/kernel/pipe/node_test.go
+++ b/pkg/sentry/kernel/pipe/node_test.go
@@ -19,9 +19,9 @@ import (
"time"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/fs"
- "gvisor.dev/gvisor/pkg/syserror"
)
type sleeper struct {
@@ -239,7 +239,7 @@ func TestBlockedOpenIsCancellable(t *testing.T) {
// If the cancel on the sleeper didn't work, the open for read would never
// return.
res := <-done
- if res.error != syserror.ErrInterrupted {
+ if res.error != linuxerr.ErrInterrupted {
t.Fatalf("Cancellation didn't cause GetFile to return fs.ErrInterrupted, got %v.",
res.error)
}
@@ -258,7 +258,7 @@ func TestNonblockingWriteOpenFileNoReaders(t *testing.T) {
ctx := newSleeperContext(t)
f := NewInodeOperations(ctx, perms, newNamedPipe(t))
- if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); err != syserror.ENXIO {
+ if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); !linuxerr.Equals(linuxerr.ENXIO, err) {
t.Fatalf("Nonblocking open for write failed unexpected error %v.", err)
}
}
diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go
index 06769931a..86beee6fe 100644
--- a/pkg/sentry/kernel/pipe/pipe.go
+++ b/pkg/sentry/kernel/pipe/pipe.go
@@ -22,11 +22,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -200,7 +200,7 @@ func (p *Pipe) peekLocked(count int64, f func(safemem.BlockSeq) (uint64, error))
if !p.HasWriters() {
return 0, io.EOF
}
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
count = p.size
}
@@ -249,7 +249,7 @@ func (p *Pipe) writeLocked(count int64, f func(safemem.BlockSeq) (uint64, error)
avail := p.max - p.size
if avail == 0 {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
short := false
if count > avail {
@@ -257,7 +257,7 @@ func (p *Pipe) writeLocked(count int64, f func(safemem.BlockSeq) (uint64, error)
// (PIPE_BUF) be atomic, but requires no atomicity for writes
// larger than this.
if count <= atomicIOBytes {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
count = avail
short = true
@@ -306,7 +306,7 @@ func (p *Pipe) writeLocked(count int64, f func(safemem.BlockSeq) (uint64, error)
// If we shortened the write, adjust the returned error appropriately.
if short {
- return done, syserror.ErrWouldBlock
+ return done, linuxerr.ErrWouldBlock
}
return done, nil
@@ -428,18 +428,18 @@ func (p *Pipe) FifoSize(context.Context, *fs.File) (int64, error) {
// SetFifoSize implements fs.FifoSizer.SetFifoSize.
func (p *Pipe) SetFifoSize(size int64) (int64, error) {
if size < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if size < MinimumPipeSize {
size = MinimumPipeSize // Per spec.
}
if size > MaximumPipeSize {
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
p.mu.Lock()
defer p.mu.Unlock()
if size < p.size {
- return 0, syserror.EBUSY
+ return 0, linuxerr.EBUSY
}
p.max = size
return size, nil
diff --git a/pkg/sentry/kernel/pipe/pipe_test.go b/pkg/sentry/kernel/pipe/pipe_test.go
index 867f4a76b..aa3ab305d 100644
--- a/pkg/sentry/kernel/pipe/pipe_test.go
+++ b/pkg/sentry/kernel/pipe/pipe_test.go
@@ -18,8 +18,8 @@ import (
"bytes"
"testing"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -51,8 +51,8 @@ func TestPipeReadBlock(t *testing.T) {
defer w.DecRef(ctx)
n, err := r.Readv(ctx, usermem.BytesIOSequence(make([]byte, 1)))
- if n != 0 || err != syserror.ErrWouldBlock {
- t.Fatalf("Readv: got (%d, %v), wanted (0, %v)", n, err, syserror.ErrWouldBlock)
+ if n != 0 || err != linuxerr.ErrWouldBlock {
+ t.Fatalf("Readv: got (%d, %v), wanted (0, %v)", n, err, linuxerr.ErrWouldBlock)
}
}
@@ -67,7 +67,7 @@ func TestPipeWriteBlock(t *testing.T) {
msg := make([]byte, capacity+1)
n, err := w.Writev(ctx, usermem.BytesIOSequence(msg))
- if wantN, wantErr := int64(capacity), syserror.ErrWouldBlock; n != wantN || err != wantErr {
+ if wantN, wantErr := int64(capacity), linuxerr.ErrWouldBlock; n != wantN || err != wantErr {
t.Fatalf("Writev: got (%d, %v), wanted (%d, %v)", n, err, wantN, wantErr)
}
}
@@ -102,7 +102,7 @@ func TestPipeWriteUntilEnd(t *testing.T) {
for {
n, err := r.Readv(ctx, dst)
dst = dst.DropFirst64(n)
- if err == syserror.ErrWouldBlock {
+ if err == linuxerr.ErrWouldBlock {
select {
case <-ch:
continue
@@ -129,7 +129,7 @@ func TestPipeWriteUntilEnd(t *testing.T) {
for src.NumBytes() != 0 {
n, err := w.Writev(ctx, src)
src = src.DropFirst64(n)
- if err == syserror.ErrWouldBlock {
+ if err == linuxerr.ErrWouldBlock {
<-ch
continue
}
diff --git a/pkg/sentry/kernel/pipe/pipe_unsafe.go b/pkg/sentry/kernel/pipe/pipe_unsafe.go
index dd60cba24..077c5d596 100644
--- a/pkg/sentry/kernel/pipe/pipe_unsafe.go
+++ b/pkg/sentry/kernel/pipe/pipe_unsafe.go
@@ -23,6 +23,8 @@ import (
// concurrent calls cannot deadlock.
//
// Preconditions: x != y.
+// +checklocksacquire:x.mu
+// +checklocksacquire:y.mu
func lockTwoPipes(x, y *Pipe) {
// Lock the two pipes in order of increasing address.
if uintptr(unsafe.Pointer(x)) < uintptr(unsafe.Pointer(y)) {
diff --git a/pkg/sentry/kernel/pipe/pipe_util.go b/pkg/sentry/kernel/pipe/pipe_util.go
index 24e467e93..c883a9014 100644
--- a/pkg/sentry/kernel/pipe/pipe_util.go
+++ b/pkg/sentry/kernel/pipe/pipe_util.go
@@ -22,6 +22,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/amutex"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -86,7 +87,7 @@ func (p *Pipe) Write(ctx context.Context, src usermem.IOSequence) (int64, error)
if n > 0 {
p.Notify(waiter.ReadableEvents)
}
- if err == unix.EPIPE {
+ if linuxerr.Equals(linuxerr.EPIPE, err) {
// If we are returning EPIPE send SIGPIPE to the task.
if sendSig := linux.SignalNoInfoFuncFromContext(ctx); sendSig != nil {
sendSig(linux.SIGPIPE)
@@ -135,7 +136,7 @@ func (p *Pipe) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArgume
v = math.MaxInt32 // Silently truncate.
}
// Copy result to userspace.
- iocc := primitive.IOCopyContext{
+ iocc := usermem.IOCopyContext{
IO: io,
Ctx: ctx,
Opts: usermem.IOOpts{
@@ -156,6 +157,7 @@ func (p *Pipe) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArgume
//
// mu must be held by the caller. waitFor returns with mu held, but it will
// drop mu before blocking for any reader/writers.
+// +checklocks:mu
func waitFor(mu *sync.Mutex, wakeupChan *chan struct{}, sleeper amutex.Sleeper) bool {
// Ideally this function would simply use a condition variable. However, the
// wait needs to be interruptible via 'sleeper', so we must sychronize via a
diff --git a/pkg/sentry/kernel/pipe/vfs.go b/pkg/sentry/kernel/pipe/vfs.go
index 95b948edb..a6f1989f5 100644
--- a/pkg/sentry/kernel/pipe/vfs.go
+++ b/pkg/sentry/kernel/pipe/vfs.go
@@ -17,12 +17,12 @@ package pipe
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -79,7 +79,7 @@ func (vp *VFSPipe) ReaderWriterPair(ctx context.Context, mnt *vfs.Mount, vfsd *v
// Allocate implements vfs.FileDescriptionImpl.Allocate.
func (*VFSPipe) Allocate(context.Context, uint64, uint64, uint64) error {
- return syserror.ESPIPE
+ return linuxerr.ESPIPE
}
// Open opens the pipe represented by vp.
@@ -90,7 +90,7 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s
readable := vfs.MayReadFileWithOpenFlags(statusFlags)
writable := vfs.MayWriteFileWithOpenFlags(statusFlags)
if !readable && !writable {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
fd, err := vp.newFD(mnt, vfsd, statusFlags, locks)
@@ -120,7 +120,7 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s
// writer, we have to wait for a writer to open the other end.
if vp.pipe.isNamed && statusFlags&linux.O_NONBLOCK == 0 && !vp.pipe.HasWriters() && !waitFor(&vp.mu, &vp.wWakeup, ctx) {
fd.DecRef(ctx)
- return nil, syserror.EINTR
+ return nil, linuxerr.EINTR
}
case writable:
@@ -131,12 +131,12 @@ func (vp *VFSPipe) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, s
// side isn't open yet.
if statusFlags&linux.O_NONBLOCK != 0 {
fd.DecRef(ctx)
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
// Wait for a reader to open the other end.
if !waitFor(&vp.mu, &vp.rWakeup, ctx) {
fd.DecRef(ctx)
- return nil, syserror.EINTR
+ return nil, linuxerr.EINTR
}
}
@@ -224,7 +224,7 @@ func (fd *VFSPipeFD) Readiness(mask waiter.EventMask) waiter.EventMask {
// Allocate implements vfs.FileDescriptionImpl.Allocate.
func (fd *VFSPipeFD) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.ESPIPE
+ return linuxerr.ESPIPE
}
// EventRegister implements waiter.Waitable.EventRegister.
@@ -415,7 +415,7 @@ func Tee(ctx context.Context, dst, src *VFSPipeFD, count int64) (int64, error) {
// Preconditions: count > 0.
func spliceOrTee(ctx context.Context, dst, src *VFSPipeFD, count int64, removeFromSrc bool) (int64, error) {
if dst.pipe == src.pipe {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
lockTwoPipes(dst.pipe, src.pipe)
diff --git a/pkg/sentry/kernel/posixtimer.go b/pkg/sentry/kernel/posixtimer.go
index d801a3d83..319754a42 100644
--- a/pkg/sentry/kernel/posixtimer.go
+++ b/pkg/sentry/kernel/posixtimer.go
@@ -18,8 +18,8 @@ import (
"math"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
// IntervalTimer represents a POSIX interval timer as described by
@@ -175,7 +175,7 @@ func (t *Task) IntervalTimerCreate(c ktime.Clock, sigev *linux.Sigevent) (linux.
break
}
if t.tg.nextTimerID == end {
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
@@ -214,16 +214,16 @@ func (t *Task) IntervalTimerCreate(c ktime.Clock, sigev *linux.Sigevent) (linux.
target, ok := t.tg.pidns.tasks[ThreadID(sigev.Tid)]
t.tg.pidns.owner.mu.RUnlock()
if !ok || target.tg != t.tg {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
it.target = target
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if sigev.Notify != linux.SIGEV_NONE {
it.signo = linux.Signal(sigev.Signo)
if !it.signo.IsValid() {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
it.timer = ktime.NewTimer(c, it)
@@ -238,7 +238,7 @@ func (t *Task) IntervalTimerDelete(id linux.TimerID) error {
defer t.tg.timerMu.Unlock()
it := t.tg.timers[id]
if it == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
delete(t.tg.timers, id)
it.DestroyTimer()
@@ -251,7 +251,7 @@ func (t *Task) IntervalTimerSettime(id linux.TimerID, its linux.Itimerspec, abs
defer t.tg.timerMu.Unlock()
it := t.tg.timers[id]
if it == nil {
- return linux.Itimerspec{}, syserror.EINVAL
+ return linux.Itimerspec{}, linuxerr.EINVAL
}
newS, err := ktime.SettingFromItimerspec(its, abs, it.timer.Clock())
@@ -269,7 +269,7 @@ func (t *Task) IntervalTimerGettime(id linux.TimerID) (linux.Itimerspec, error)
defer t.tg.timerMu.Unlock()
it := t.tg.timers[id]
if it == nil {
- return linux.Itimerspec{}, syserror.EINVAL
+ return linux.Itimerspec{}, linuxerr.EINVAL
}
tm, s := it.timer.Get()
@@ -285,7 +285,7 @@ func (t *Task) IntervalTimerGetoverrun(id linux.TimerID) (int32, error) {
defer t.tg.timerMu.Unlock()
it := t.tg.timers[id]
if it == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// By timer_create(2) invariant, either it.target == nil (in which case
// it.overrunLast is immutably 0) or t.tg == it.target.tg; and the fact
diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go
index a6287fd6a..717c9a6b3 100644
--- a/pkg/sentry/kernel/ptrace.go
+++ b/pkg/sentry/kernel/ptrace.go
@@ -19,10 +19,10 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -294,7 +294,7 @@ func (t *Task) isYAMADescendantOfLocked(ancestor *Task) bool {
// Precondition: the TaskSet mutex must be locked (for reading or writing).
func (t *Task) hasYAMAExceptionForLocked(tracer *Task) bool {
- allowed, ok := t.k.ptraceExceptions[t]
+ allowed, ok := t.k.ptraceExceptions[t.tg.leader]
if !ok {
return false
}
@@ -464,7 +464,7 @@ func (t *Task) ptraceUnfreezeLocked() {
// stop.
func (t *Task) ptraceUnstop(mode ptraceSyscallMode, singlestep bool, sig linux.Signal) error {
if sig != 0 && !sig.IsValid() {
- return syserror.EIO
+ return linuxerr.EIO
}
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
@@ -481,7 +481,7 @@ func (t *Task) ptraceTraceme() error {
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
if t.hasTracer() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if t.parent == nil {
// In Linux, only init can not have a parent, and init is assumed never
@@ -497,7 +497,7 @@ func (t *Task) ptraceTraceme() error {
return nil
}
if !t.parent.canTraceLocked(t, true) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if t.parent.exitState != TaskExitNone {
// Fail silently, as if we were successfully attached but then
@@ -513,25 +513,25 @@ func (t *Task) ptraceTraceme() error {
// ptrace(PTRACE_SEIZE, target, 0, opts) if seize is true. t is the caller.
func (t *Task) ptraceAttach(target *Task, seize bool, opts uintptr) error {
if t.tg == target.tg {
- return syserror.EPERM
+ return linuxerr.EPERM
}
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
if !t.canTraceLocked(target, true) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if target.hasTracer() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Attaching to zombies and dead tasks is not permitted; the exit
// notification logic relies on this. Linux allows attaching to PF_EXITING
// tasks, though.
if target.exitState >= TaskExitZombie {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if seize {
if err := target.ptraceSetOptionsLocked(opts); err != nil {
- return syserror.EIO
+ return linuxerr.EIO
}
}
target.ptraceTracer.Store(t)
@@ -568,7 +568,7 @@ func (t *Task) ptraceAttach(target *Task, seize bool, opts uintptr) error {
// ptrace stop.
func (t *Task) ptraceDetach(target *Task, sig linux.Signal) error {
if sig != 0 && !sig.IsValid() {
- return syserror.EIO
+ return linuxerr.EIO
}
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
@@ -651,6 +651,7 @@ func (t *Task) forgetTracerLocked() {
// Preconditions:
// * The signal mutex must be locked.
// * The caller must be running on the task goroutine.
+// +checklocks:t.tg.signalHandlers.mu
func (t *Task) ptraceSignalLocked(info *linux.SignalInfo) bool {
if linux.Signal(info.Signo) == linux.SIGKILL {
return false
@@ -766,14 +767,14 @@ const (
// ptraceClone is called at the end of a clone or fork syscall to check if t
// should enter PTRACE_EVENT_CLONE, PTRACE_EVENT_FORK, or PTRACE_EVENT_VFORK
// stop. child is the new task.
-func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions) bool {
+func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, args *linux.CloneArgs) bool {
if !t.hasTracer() {
return false
}
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
event := false
- if !opts.Untraced {
+ if args.Flags&linux.CLONE_UNTRACED == 0 {
switch kind {
case ptraceCloneKindClone:
if t.ptraceOpts.TraceClone {
@@ -808,7 +809,7 @@ func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions
// clone(2)'s documentation of CLONE_UNTRACED and CLONE_PTRACE is
// confusingly wrong; see kernel/fork.c:_do_fork() => copy_process() =>
// include/linux/ptrace.h:ptrace_init_task().
- if event || opts.InheritTracer {
+ if event || args.Flags&linux.CLONE_PTRACE != 0 {
tracer := t.Tracer()
if tracer != nil {
child.ptraceTracer.Store(tracer)
@@ -910,7 +911,7 @@ func (t *Task) ptraceExit() {
return
}
t.tg.signalHandlers.mu.Lock()
- status := t.exitStatus.Status()
+ status := t.exitStatus
t.tg.signalHandlers.mu.Unlock()
t.Debugf("Entering PTRACE_EVENT_EXIT stop")
t.ptraceEventLocked(linux.PTRACE_EVENT_EXIT, uint64(status))
@@ -938,7 +939,7 @@ func (t *Task) ptraceKill(target *Task) error {
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
if target.Tracer() != t {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
target.tg.signalHandlers.mu.Lock()
defer target.tg.signalHandlers.mu.Unlock()
@@ -962,10 +963,10 @@ func (t *Task) ptraceInterrupt(target *Task) error {
t.tg.pidns.owner.mu.Lock()
defer t.tg.pidns.owner.mu.Unlock()
if target.Tracer() != t {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
if !target.ptraceSeized {
- return syserror.EIO
+ return linuxerr.EIO
}
target.tg.signalHandlers.mu.Lock()
defer target.tg.signalHandlers.mu.Unlock()
@@ -994,7 +995,7 @@ func (t *Task) ptraceSetOptionsLocked(opts uintptr) error {
linux.PTRACE_O_TRACEVFORK |
linux.PTRACE_O_TRACEVFORKDONE)
if opts&^valid != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
t.ptraceOpts = ptraceOptions{
ExitKill: opts&linux.PTRACE_O_EXITKILL != 0,
@@ -1020,7 +1021,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
// specified by pid.
target := t.tg.pidns.TaskWithID(pid)
if target == nil {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
// PTRACE_ATTACH and PTRACE_SEIZE do not require that target is not already
@@ -1028,7 +1029,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
if req == linux.PTRACE_ATTACH || req == linux.PTRACE_SEIZE {
seize := req == linux.PTRACE_SEIZE
if seize && addr != 0 {
- return syserror.EIO
+ return linuxerr.EIO
}
return t.ptraceAttach(target, seize, uintptr(data))
}
@@ -1045,7 +1046,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
t.tg.pidns.owner.mu.RLock()
if target.Tracer() != t {
t.tg.pidns.owner.mu.RUnlock()
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
if !target.ptraceFreeze() {
t.tg.pidns.owner.mu.RUnlock()
@@ -1053,7 +1054,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
// PTRACE_TRACEME, PTRACE_INTERRUPT, and PTRACE_KILL) require the
// tracee to be in a ptrace-stop, otherwise they fail with ESRCH." -
// ptrace(2)
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
t.tg.pidns.owner.mu.RUnlock()
// Even if the target has a ptrace-stop active, the tracee's task goroutine
@@ -1118,13 +1119,13 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
if !target.ptraceSeized {
- return syserror.EIO
+ return linuxerr.EIO
}
if target.ptraceSiginfo == nil {
- return syserror.EIO
+ return linuxerr.EIO
}
if target.ptraceSiginfo.Code>>8 != linux.PTRACE_EVENT_STOP {
- return syserror.EIO
+ return linuxerr.EIO
}
target.tg.signalHandlers.mu.Lock()
defer target.tg.signalHandlers.mu.Unlock()
@@ -1221,7 +1222,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
if target.ptraceSiginfo == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
_, err := target.ptraceSiginfo.CopyOut(t, data)
return err
@@ -1234,14 +1235,14 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
if target.ptraceSiginfo == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
target.ptraceSiginfo = &info
return nil
case linux.PTRACE_GETSIGMASK:
if addr != linux.SignalSetSize {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mask := target.SignalMask()
_, err := mask.CopyOut(t, data)
@@ -1249,7 +1250,7 @@ func (t *Task) Ptrace(req int64, pid ThreadID, addr, data hostarch.Addr) error {
case linux.PTRACE_SETSIGMASK:
if addr != linux.SignalSetSize {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
var mask linux.SignalSet
if _, err := mask.CopyIn(t, data); err != nil {
diff --git a/pkg/sentry/kernel/ptrace_amd64.go b/pkg/sentry/kernel/ptrace_amd64.go
index 5ae05b5c3..564add01b 100644
--- a/pkg/sentry/kernel/ptrace_amd64.go
+++ b/pkg/sentry/kernel/ptrace_amd64.go
@@ -12,14 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -87,6 +88,6 @@ func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) err
return err
default:
- return syserror.EIO
+ return linuxerr.EIO
}
}
diff --git a/pkg/sentry/kernel/ptrace_arm64.go b/pkg/sentry/kernel/ptrace_arm64.go
index 46dd84cbc..7c2b94339 100644
--- a/pkg/sentry/kernel/ptrace_arm64.go
+++ b/pkg/sentry/kernel/ptrace_arm64.go
@@ -12,16 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kernel
import (
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ptraceArch implements arch-specific ptrace commands.
func (t *Task) ptraceArch(target *Task, req int64, addr, data hostarch.Addr) error {
- return syserror.EIO
+ return linuxerr.EIO
}
diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go
index 4bc5bca44..de352f4f2 100644
--- a/pkg/sentry/kernel/rseq.go
+++ b/pkg/sentry/kernel/rseq.go
@@ -18,9 +18,9 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/hostcpu"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -59,23 +59,23 @@ func (t *Task) RSeqAvailable() bool {
func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error {
if t.rseqAddr != 0 {
if t.rseqAddr != addr {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if t.rseqSignature != signature {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// rseq must be aligned and correctly sized.
if addr&(linux.AlignOfRSeq-1) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length != linux.SizeOfRSeq {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if _, ok := t.MemoryManager().CheckIORange(addr, linux.SizeOfRSeq); !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
t.rseqAddr = addr
@@ -92,7 +92,7 @@ func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error {
t.Debugf("Failed to copy CPU to %#x for rseq: %v", t.rseqAddr, err)
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
t.SendSignal(SignalInfoPriv(linux.SIGSEGV))
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
return nil
@@ -103,16 +103,16 @@ func (t *Task) SetRSeq(addr hostarch.Addr, length, signature uint32) error {
// Preconditions: The caller must be running on the task goroutine.
func (t *Task) ClearRSeq(addr hostarch.Addr, length, signature uint32) error {
if t.rseqAddr == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if t.rseqAddr != addr {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length != linux.SizeOfRSeq {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if t.rseqSignature != signature {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := t.rseqClearCPU(); err != nil {
@@ -152,10 +152,10 @@ func (t *Task) SetOldRSeqCriticalRegion(r OldRSeqCriticalRegion) error {
return nil
}
if r.CriticalSection.Start >= r.CriticalSection.End {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if r.CriticalSection.Contains(r.Restart) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// TODO(jamieliu): check that r.CriticalSection and r.Restart are in
// the application address range, for consistency with Linux.
@@ -187,7 +187,7 @@ func (t *Task) SetOldRSeqCPUAddr(addr hostarch.Addr) error {
// unfortunate, but unlikely in a correct program.
if err := t.rseqUpdateCPU(); err != nil {
t.oldRSeqCPUAddr = 0
- return syserror.EINVAL // yes, EINVAL, not err or EFAULT
+ return linuxerr.EINVAL // yes, EINVAL, not err or EFAULT
}
return nil
}
diff --git a/pkg/sentry/kernel/seccomp.go b/pkg/sentry/kernel/seccomp.go
index 54ca43c2e..0d66648c3 100644
--- a/pkg/sentry/kernel/seccomp.go
+++ b/pkg/sentry/kernel/seccomp.go
@@ -18,9 +18,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/syserror"
)
const maxSyscallFilterInstructions = 1 << 15
@@ -176,7 +176,7 @@ func (t *Task) AppendSyscallFilter(p bpf.Program, syncAll bool) error {
}
if totalLength > maxSyscallFilterInstructions {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
newFilters = append(newFilters, p)
diff --git a/pkg/sentry/kernel/semaphore/BUILD b/pkg/sentry/kernel/semaphore/BUILD
index 65e5427c1..6aa74219e 100644
--- a/pkg/sentry/kernel/semaphore/BUILD
+++ b/pkg/sentry/kernel/semaphore/BUILD
@@ -25,12 +25,12 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
- "//pkg/log",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/fs",
"//pkg/sentry/kernel/auth",
+ "//pkg/sentry/kernel/ipc",
"//pkg/sentry/kernel/time",
"//pkg/sync",
- "//pkg/syserror",
],
)
@@ -40,10 +40,11 @@ go_test(
srcs = ["semaphore_test.go"],
library = ":semaphore",
deps = [
- "//pkg/abi/linux",
- "//pkg/context",
- "//pkg/sentry/contexttest",
- "//pkg/sentry/kernel/auth",
- "//pkg/syserror",
+ "//pkg/abi/linux", # keep
+ "//pkg/context", # keep
+ "//pkg/errors/linuxerr", #keep
+ "//pkg/sentry/contexttest", # keep
+ "//pkg/sentry/kernel/auth", # keep
+ "//pkg/sentry/kernel/ipc", # keep
],
)
diff --git a/pkg/sentry/kernel/semaphore/semaphore.go b/pkg/sentry/kernel/semaphore/semaphore.go
index 47bb66b42..28e466948 100644
--- a/pkg/sentry/kernel/semaphore/semaphore.go
+++ b/pkg/sentry/kernel/semaphore/semaphore.go
@@ -20,12 +20,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/log"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -46,15 +46,15 @@ const (
//
// +stateify savable
type Registry struct {
- // userNS owning the ipc name this registry belongs to. Immutable.
- userNS *auth.UserNamespace
// mu protects all fields below.
- mu sync.Mutex `state:"nosave"`
- semaphores map[int32]*Set
- lastIDUsed int32
+ mu sync.Mutex `state:"nosave"`
+
+ // reg defines basic fields and operations needed for all SysV registries.
+ reg *ipc.Registry
+
// indexes maintains a mapping between a set's index in virtual array and
// its identifier.
- indexes map[int32]int32
+ indexes map[int32]ipc.ID
}
// Set represents a set of semaphores that can be operated atomically.
@@ -64,19 +64,11 @@ type Set struct {
// registry owning this sem set. Immutable.
registry *Registry
- // Id is a handle that identifies the set.
- ID int32
-
- // key is an user provided key that can be shared between processes.
- key int32
+ // mu protects all fields below.
+ mu sync.Mutex `state:"nosave"`
- // creator is the user that created the set. Immutable.
- creator fs.FileOwner
+ obj *ipc.Object
- // mu protects all fields below.
- mu sync.Mutex `state:"nosave"`
- owner fs.FileOwner
- perms fs.FilePermissions
opTime ktime.Time
changeTime ktime.Time
@@ -114,9 +106,8 @@ type waiter struct {
// NewRegistry creates a new semaphore set registry.
func NewRegistry(userNS *auth.UserNamespace) *Registry {
return &Registry{
- userNS: userNS,
- semaphores: make(map[int32]*Set),
- indexes: make(map[int32]int32),
+ reg: ipc.NewRegistry(userNS),
+ indexes: make(map[int32]ipc.ID),
}
}
@@ -125,62 +116,48 @@ func NewRegistry(userNS *auth.UserNamespace) *Registry {
// a new set is always created. If create is false, it fails if a set cannot
// be found. If exclusive is true, it fails if a set with the same key already
// exists.
-func (r *Registry) FindOrCreate(ctx context.Context, key, nsems int32, mode linux.FileMode, private, create, exclusive bool) (*Set, error) {
+func (r *Registry) FindOrCreate(ctx context.Context, key ipc.Key, nsems int32, mode linux.FileMode, private, create, exclusive bool) (*Set, error) {
if nsems < 0 || nsems > semsMax {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
r.mu.Lock()
defer r.mu.Unlock()
if !private {
- // Look up an existing semaphore.
- if set := r.findByKey(key); set != nil {
- set.mu.Lock()
- defer set.mu.Unlock()
-
- // Check that caller can access semaphore set.
- creds := auth.CredentialsFromContext(ctx)
- if !set.checkPerms(creds, fs.PermsFromMode(mode)) {
- return nil, syserror.EACCES
- }
+ set, err := r.reg.Find(ctx, key, mode, create, exclusive)
+ if err != nil {
+ return nil, err
+ }
- // Validate parameters.
+ // Validate semaphore-specific parameters.
+ if set != nil {
+ set := set.(*Set)
if nsems > int32(set.Size()) {
- return nil, syserror.EINVAL
- }
- if create && exclusive {
- return nil, syserror.EEXIST
+ return nil, linuxerr.EINVAL
}
return set, nil
}
-
- if !create {
- // Semaphore not found and should not be created.
- return nil, syserror.ENOENT
- }
}
// Zero is only valid if an existing set is found.
if nsems == 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
// Apply system limits.
//
- // Map semaphores and map indexes in a registry are of the same size,
- // check map semaphores only here for the system limit.
- if len(r.semaphores) >= setsMax {
- return nil, syserror.ENOSPC
+ // Map reg.objects and map indexes in a registry are of the same size,
+ // check map reg.objects only here for the system limit.
+ if r.reg.ObjectCount() >= setsMax {
+ return nil, linuxerr.ENOSPC
}
if r.totalSems() > int(semsTotalMax-nsems) {
- return nil, syserror.ENOSPC
+ return nil, linuxerr.ENOSPC
}
// Finally create a new set.
- owner := fs.FileOwnerFromContext(ctx)
- perms := fs.FilePermsFromMode(mode)
- return r.newSet(ctx, key, owner, owner, perms, nsems)
+ return r.newSetLocked(ctx, key, fs.FileOwnerFromContext(ctx), fs.FilePermsFromMode(mode), nsems)
}
// IPCInfo returns information about system-wide semaphore limits and parameters.
@@ -207,7 +184,7 @@ func (r *Registry) SemInfo() *linux.SemInfo {
defer r.mu.Unlock()
info := r.IPCInfo()
- info.SemUsz = uint32(len(r.semaphores))
+ info.SemUsz = uint32(r.reg.ObjectCount())
info.SemAem = uint32(r.totalSems())
return info
@@ -230,77 +207,59 @@ func (r *Registry) HighestIndex() int32 {
return highestIndex
}
-// RemoveID removes set with give 'id' from the registry and marks the set as
+// Remove removes set with give 'id' from the registry and marks the set as
// dead. All waiters will be awakened and fail.
-func (r *Registry) RemoveID(id int32, creds *auth.Credentials) error {
+func (r *Registry) Remove(id ipc.ID, creds *auth.Credentials) error {
r.mu.Lock()
defer r.mu.Unlock()
- set := r.semaphores[id]
- if set == nil {
- return syserror.EINVAL
- }
index, found := r.findIndexByID(id)
if !found {
- // Inconsistent state.
- panic(fmt.Sprintf("unable to find an index for ID: %d", id))
+ return linuxerr.EINVAL
}
+ delete(r.indexes, index)
- set.mu.Lock()
- defer set.mu.Unlock()
-
- // "The effective user ID of the calling process must match the creator or
- // owner of the semaphore set, or the caller must be privileged."
- if !set.checkCredentials(creds) && !set.checkCapability(creds) {
- return syserror.EACCES
- }
+ r.reg.Remove(id, creds)
- delete(r.semaphores, set.ID)
- delete(r.indexes, index)
- set.destroy()
return nil
}
-func (r *Registry) newSet(ctx context.Context, key int32, owner, creator fs.FileOwner, perms fs.FilePermissions, nsems int32) (*Set, error) {
+// newSetLocked creates a new Set using given fields. An error is returned if there
+// are no more available identifiers.
+//
+// Precondition: r.mu must be held.
+func (r *Registry) newSetLocked(ctx context.Context, key ipc.Key, creator fs.FileOwner, perms fs.FilePermissions, nsems int32) (*Set, error) {
set := &Set{
registry: r,
- key: key,
- owner: owner,
- creator: owner,
- perms: perms,
+ obj: ipc.NewObject(r.reg.UserNS, ipc.Key(key), creator, creator, perms),
changeTime: ktime.NowFromContext(ctx),
sems: make([]sem, nsems),
}
- // Find the next available ID.
- for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ {
- // Handle wrap around.
- if id < 0 {
- id = 0
- continue
- }
- if r.semaphores[id] == nil {
- index, found := r.findFirstAvailableIndex()
- if !found {
- panic("unable to find an available index")
- }
- r.indexes[index] = id
- r.lastIDUsed = id
- r.semaphores[id] = set
- set.ID = id
- return set, nil
- }
+ err := r.reg.Register(set)
+ if err != nil {
+ return nil, err
+ }
+
+ index, found := r.findFirstAvailableIndex()
+ if !found {
+ // See linux, ipc/sem.c:newary().
+ return nil, linuxerr.ENOSPC
}
+ r.indexes[index] = set.obj.ID
- log.Warningf("Semaphore map is full, they must be leaking")
- return nil, syserror.ENOMEM
+ return set, nil
}
// FindByID looks up a set given an ID.
-func (r *Registry) FindByID(id int32) *Set {
+func (r *Registry) FindByID(id ipc.ID) *Set {
r.mu.Lock()
defer r.mu.Unlock()
- return r.semaphores[id]
+ mech := r.reg.FindByID(id)
+ if mech == nil {
+ return nil
+ }
+ return mech.(*Set)
}
// FindByIndex looks up a set given an index.
@@ -312,19 +271,10 @@ func (r *Registry) FindByIndex(index int32) *Set {
if !present {
return nil
}
- return r.semaphores[id]
+ return r.reg.FindByID(id).(*Set)
}
-func (r *Registry) findByKey(key int32) *Set {
- for _, v := range r.semaphores {
- if v.key == key {
- return v
- }
- }
- return nil
-}
-
-func (r *Registry) findIndexByID(id int32) (int32, bool) {
+func (r *Registry) findIndexByID(id ipc.ID) (int32, bool) {
for k, v := range r.indexes {
if v == id {
return k, true
@@ -344,12 +294,36 @@ func (r *Registry) findFirstAvailableIndex() (int32, bool) {
func (r *Registry) totalSems() int {
totalSems := 0
- for _, v := range r.semaphores {
- totalSems += v.Size()
- }
+ r.reg.ForAllObjects(
+ func(o ipc.Mechanism) {
+ totalSems += o.(*Set).Size()
+ },
+ )
return totalSems
}
+// ID returns semaphore's ID.
+func (s *Set) ID() ipc.ID {
+ return s.obj.ID
+}
+
+// Object implements ipc.Mechanism.Object.
+func (s *Set) Object() *ipc.Object {
+ return s.obj
+}
+
+// Lock implements ipc.Mechanism.Lock.
+func (s *Set) Lock() {
+ s.mu.Lock()
+}
+
+// Unlock implements ipc.mechanism.Unlock.
+//
+// +checklocksignore
+func (s *Set) Unlock() {
+ s.mu.Unlock()
+}
+
func (s *Set) findSem(num int32) *sem {
if num < 0 || int(num) >= s.Size() {
return nil
@@ -362,19 +336,15 @@ func (s *Set) Size() int {
return len(s.sems)
}
-// Change changes some fields from the set atomically.
-func (s *Set) Change(ctx context.Context, creds *auth.Credentials, owner fs.FileOwner, perms fs.FilePermissions) error {
+// Set modifies attributes for a semaphore set. See semctl(IPC_SET).
+func (s *Set) Set(ctx context.Context, ds *linux.SemidDS) error {
s.mu.Lock()
defer s.mu.Unlock()
- // "The effective UID of the calling process must match the owner or creator
- // of the semaphore set, or the caller must be privileged."
- if !s.checkCredentials(creds) && !s.checkCapability(creds) {
- return syserror.EACCES
+ if err := s.obj.Set(ctx, &ds.SemPerm); err != nil {
+ return err
}
- s.owner = owner
- s.perms = perms
s.changeTime = ktime.NowFromContext(ctx)
return nil
}
@@ -394,18 +364,18 @@ func (s *Set) semStat(creds *auth.Credentials, permMask fs.PermMask) (*linux.Sem
s.mu.Lock()
defer s.mu.Unlock()
- if !s.checkPerms(creds, permMask) {
- return nil, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, permMask) {
+ return nil, linuxerr.EACCES
}
return &linux.SemidDS{
SemPerm: linux.IPCPerm{
- Key: uint32(s.key),
- UID: uint32(creds.UserNamespace.MapFromKUID(s.owner.UID)),
- GID: uint32(creds.UserNamespace.MapFromKGID(s.owner.GID)),
- CUID: uint32(creds.UserNamespace.MapFromKUID(s.creator.UID)),
- CGID: uint32(creds.UserNamespace.MapFromKGID(s.creator.GID)),
- Mode: uint16(s.perms.LinuxMode()),
+ Key: uint32(s.obj.Key),
+ UID: uint32(creds.UserNamespace.MapFromKUID(s.obj.Owner.UID)),
+ GID: uint32(creds.UserNamespace.MapFromKGID(s.obj.Owner.GID)),
+ CUID: uint32(creds.UserNamespace.MapFromKUID(s.obj.Creator.UID)),
+ CGID: uint32(creds.UserNamespace.MapFromKGID(s.obj.Creator.GID)),
+ Mode: uint16(s.obj.Perms.LinuxMode()),
Seq: 0, // IPC sequence not supported.
},
SemOTime: s.opTime.TimeT(),
@@ -417,20 +387,20 @@ func (s *Set) semStat(creds *auth.Credentials, permMask fs.PermMask) (*linux.Sem
// SetVal overrides a semaphore value, waking up waiters as needed.
func (s *Set) SetVal(ctx context.Context, num int32, val int16, creds *auth.Credentials, pid int32) error {
if val < 0 || val > valueMax {
- return syserror.ERANGE
+ return linuxerr.ERANGE
}
s.mu.Lock()
defer s.mu.Unlock()
// "The calling process must have alter permission on the semaphore set."
- if !s.checkPerms(creds, fs.PermMask{Write: true}) {
- return syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Write: true}) {
+ return linuxerr.EACCES
}
sem := s.findSem(num)
if sem == nil {
- return syserror.ERANGE
+ return linuxerr.ERANGE
}
// TODO(gvisor.dev/issue/137): Clear undo entries in all processes.
@@ -452,7 +422,7 @@ func (s *Set) SetValAll(ctx context.Context, vals []uint16, creds *auth.Credenti
for _, val := range vals {
if val > valueMax {
- return syserror.ERANGE
+ return linuxerr.ERANGE
}
}
@@ -460,8 +430,8 @@ func (s *Set) SetValAll(ctx context.Context, vals []uint16, creds *auth.Credenti
defer s.mu.Unlock()
// "The calling process must have alter permission on the semaphore set."
- if !s.checkPerms(creds, fs.PermMask{Write: true}) {
- return syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Write: true}) {
+ return linuxerr.EACCES
}
for i, val := range vals {
@@ -482,13 +452,13 @@ func (s *Set) GetVal(num int32, creds *auth.Credentials) (int16, error) {
defer s.mu.Unlock()
// "The calling process must have read permission on the semaphore set."
- if !s.checkPerms(creds, fs.PermMask{Read: true}) {
- return 0, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
+ return 0, linuxerr.EACCES
}
sem := s.findSem(num)
if sem == nil {
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
return sem.value, nil
}
@@ -499,8 +469,8 @@ func (s *Set) GetValAll(creds *auth.Credentials) ([]uint16, error) {
defer s.mu.Unlock()
// "The calling process must have read permission on the semaphore set."
- if !s.checkPerms(creds, fs.PermMask{Read: true}) {
- return nil, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
+ return nil, linuxerr.EACCES
}
vals := make([]uint16, s.Size())
@@ -516,13 +486,13 @@ func (s *Set) GetPID(num int32, creds *auth.Credentials) (int32, error) {
defer s.mu.Unlock()
// "The calling process must have read permission on the semaphore set."
- if !s.checkPerms(creds, fs.PermMask{Read: true}) {
- return 0, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
+ return 0, linuxerr.EACCES
}
sem := s.findSem(num)
if sem == nil {
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
return sem.pid, nil
}
@@ -532,13 +502,13 @@ func (s *Set) countWaiters(num int32, creds *auth.Credentials, pred func(w *wait
defer s.mu.Unlock()
// The calling process must have read permission on the semaphore set.
- if !s.checkPerms(creds, fs.PermMask{Read: true}) {
- return 0, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
+ return 0, linuxerr.EACCES
}
sem := s.findSem(num)
if sem == nil {
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
var cnt uint16
for w := sem.waiters.Front(); w != nil; w = w.Next() {
@@ -574,22 +544,22 @@ func (s *Set) ExecuteOps(ctx context.Context, ops []linux.Sembuf, creds *auth.Cr
// Did it race with a removal operation?
if s.dead {
- return nil, 0, syserror.EIDRM
+ return nil, 0, linuxerr.EIDRM
}
// Validate the operations.
readOnly := true
for _, op := range ops {
if s.findSem(int32(op.SemNum)) == nil {
- return nil, 0, syserror.EFBIG
+ return nil, 0, linuxerr.EFBIG
}
if op.SemOp != 0 {
readOnly = false
}
}
- if !s.checkPerms(creds, fs.PermMask{Read: readOnly, Write: !readOnly}) {
- return nil, 0, syserror.EACCES
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: readOnly, Write: !readOnly}) {
+ return nil, 0, linuxerr.EACCES
}
ch, num, err := s.executeOps(ctx, ops, pid)
@@ -613,7 +583,7 @@ func (s *Set) executeOps(ctx context.Context, ops []linux.Sembuf, pid int32) (ch
if tmpVals[op.SemNum] != 0 {
// Semaphore isn't 0, must wait.
if op.SemFlg&linux.IPC_NOWAIT != 0 {
- return nil, 0, syserror.ErrWouldBlock
+ return nil, 0, linuxerr.ErrWouldBlock
}
w := newWaiter(op.SemOp)
@@ -624,12 +594,12 @@ func (s *Set) executeOps(ctx context.Context, ops []linux.Sembuf, pid int32) (ch
if op.SemOp < 0 {
// Handle 'wait' operation.
if -op.SemOp > valueMax {
- return nil, 0, syserror.ERANGE
+ return nil, 0, linuxerr.ERANGE
}
if -op.SemOp > tmpVals[op.SemNum] {
// Not enough resources, must wait.
if op.SemFlg&linux.IPC_NOWAIT != 0 {
- return nil, 0, syserror.ErrWouldBlock
+ return nil, 0, linuxerr.ErrWouldBlock
}
w := newWaiter(op.SemOp)
@@ -639,7 +609,7 @@ func (s *Set) executeOps(ctx context.Context, ops []linux.Sembuf, pid int32) (ch
} else {
// op.SemOp > 0: Handle 'signal' operation.
if tmpVals[op.SemNum] > valueMax-op.SemOp {
- return nil, 0, syserror.ERANGE
+ return nil, 0, linuxerr.ERANGE
}
}
@@ -674,38 +644,10 @@ func (s *Set) AbortWait(num int32, ch chan struct{}) {
// Waiter may not be found in case it raced with wakeWaiters().
}
-func (s *Set) checkCredentials(creds *auth.Credentials) bool {
- return s.owner.UID == creds.EffectiveKUID ||
- s.owner.GID == creds.EffectiveKGID ||
- s.creator.UID == creds.EffectiveKUID ||
- s.creator.GID == creds.EffectiveKGID
-}
-
-func (s *Set) checkCapability(creds *auth.Credentials) bool {
- return creds.HasCapabilityIn(linux.CAP_IPC_OWNER, s.registry.userNS) && creds.UserNamespace.MapFromKUID(s.owner.UID).Ok()
-}
-
-func (s *Set) checkPerms(creds *auth.Credentials, reqPerms fs.PermMask) bool {
- // Are we owner, or in group, or other?
- p := s.perms.Other
- if s.owner.UID == creds.EffectiveKUID {
- p = s.perms.User
- } else if creds.InGroup(s.owner.GID) {
- p = s.perms.Group
- }
-
- // Are permissions satisfied without capability checks?
- if p.SupersetOf(reqPerms) {
- return true
- }
-
- return s.checkCapability(creds)
-}
-
-// destroy destroys the set.
+// Destroy implements ipc.Mechanism.Destroy.
//
// Preconditions: Caller must hold 's.mu'.
-func (s *Set) destroy() {
+func (s *Set) Destroy() {
// Notify all waiters. They will fail on the next attempt to execute
// operations and return error.
s.dead = true
diff --git a/pkg/sentry/kernel/semaphore/semaphore_test.go b/pkg/sentry/kernel/semaphore/semaphore_test.go
index e47acefdf..59ac92ef1 100644
--- a/pkg/sentry/kernel/semaphore/semaphore_test.go
+++ b/pkg/sentry/kernel/semaphore/semaphore_test.go
@@ -19,9 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
)
func executeOps(ctx context.Context, t *testing.T, set *Set, ops []linux.Sembuf, block bool) chan struct{} {
@@ -55,7 +56,7 @@ func signalled(ch chan struct{}) bool {
func TestBasic(t *testing.T) {
ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
+ set := &Set{obj: &ipc.Object{ID: 123}, sems: make([]sem, 1)}
ops := []linux.Sembuf{
{SemOp: 1},
}
@@ -76,7 +77,7 @@ func TestBasic(t *testing.T) {
func TestWaitForZero(t *testing.T) {
ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
+ set := &Set{obj: &ipc.Object{ID: 123}, sems: make([]sem, 1)}
ops := []linux.Sembuf{
{SemOp: 0},
}
@@ -115,7 +116,7 @@ func TestWaitForZero(t *testing.T) {
func TestNoWait(t *testing.T) {
ctx := contexttest.Context(t)
- set := &Set{ID: 123, sems: make([]sem, 1)}
+ set := &Set{obj: &ipc.Object{ID: 123}, sems: make([]sem, 1)}
ops := []linux.Sembuf{
{SemOp: 1},
}
@@ -123,14 +124,14 @@ func TestNoWait(t *testing.T) {
ops[0].SemOp = -2
ops[0].SemFlg = linux.IPC_NOWAIT
- if _, _, err := set.executeOps(ctx, ops, 123); err != syserror.ErrWouldBlock {
- t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock)
+ if _, _, err := set.executeOps(ctx, ops, 123); err != linuxerr.ErrWouldBlock {
+ t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, linuxerr.ErrWouldBlock)
}
ops[0].SemOp = 0
ops[0].SemFlg = linux.IPC_NOWAIT
- if _, _, err := set.executeOps(ctx, ops, 123); err != syserror.ErrWouldBlock {
- t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock)
+ if _, _, err := set.executeOps(ctx, ops, 123); err != linuxerr.ErrWouldBlock {
+ t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, linuxerr.ErrWouldBlock)
}
}
@@ -138,11 +139,12 @@ func TestUnregister(t *testing.T) {
ctx := contexttest.Context(t)
r := NewRegistry(auth.NewRootUserNamespace())
set, err := r.FindOrCreate(ctx, 123, 2, linux.FileMode(0x600), true, true, true)
+
if err != nil {
t.Fatalf("FindOrCreate() failed, err: %v", err)
}
- if got := r.FindByID(set.ID); got.ID != set.ID {
- t.Fatalf("FindById(%d) failed, got: %+v, expected: %+v", set.ID, got, set)
+ if got := r.FindByID(set.obj.ID); got.obj.ID != set.obj.ID {
+ t.Fatalf("FindById(%d) failed, got: %+v, expected: %+v", set.obj.ID, got, set)
}
ops := []linux.Sembuf{
@@ -155,14 +157,14 @@ func TestUnregister(t *testing.T) {
}
creds := auth.CredentialsFromContext(ctx)
- if err := r.RemoveID(set.ID, creds); err != nil {
- t.Fatalf("RemoveID(%d) failed, err: %v", set.ID, err)
+ if err := r.Remove(set.obj.ID, creds); err != nil {
+ t.Fatalf("Remove(%d) failed, err: %v", set.obj.ID, err)
}
if !set.dead {
t.Fatalf("set is not dead: %+v", set)
}
- if got := r.FindByID(set.ID); got != nil {
- t.Fatalf("FindById(%d) failed, got: %+v, expected: nil", set.ID, got)
+ if got := r.FindByID(set.obj.ID); got != nil {
+ t.Fatalf("FindById(%d) failed, got: %+v, expected: nil", set.obj.ID, got)
}
for i, ch := range chs {
if !signalled(ch) {
diff --git a/pkg/sentry/kernel/sessions.go b/pkg/sentry/kernel/sessions.go
index ca9076406..f9f872522 100644
--- a/pkg/sentry/kernel/sessions.go
+++ b/pkg/sentry/kernel/sessions.go
@@ -16,7 +16,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// SessionID is the public identifier.
@@ -120,8 +120,9 @@ func (pg *ProcessGroup) Originator() *ThreadGroup {
// IsOrphan returns true if this process group is an orphan.
func (pg *ProcessGroup) IsOrphan() bool {
- pg.originator.TaskSet().mu.RLock()
- defer pg.originator.TaskSet().mu.RUnlock()
+ ts := pg.originator.TaskSet()
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
return pg.ancestors == 0
}
@@ -277,14 +278,14 @@ func (tg *ThreadGroup) createSession() error {
continue
}
if s.leader == tg {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if s.id == SessionID(id) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
for pg := s.processGroups.Front(); pg != nil; pg = pg.Next() {
if pg.id == ProcessGroupID(id) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
}
@@ -369,17 +370,22 @@ func (tg *ThreadGroup) CreateProcessGroup() error {
// Get the ID for this thread in the current namespace.
id := tg.pidns.tgids[tg]
+ // Check whether a process still exists or not.
+ if id == 0 {
+ return linuxerr.ESRCH
+ }
+
// Per above, check for a Session leader or existing group.
for s := tg.pidns.owner.sessions.Front(); s != nil; s = s.Next() {
if s.leader.pidns != tg.pidns {
continue
}
if s.leader == tg {
- return syserror.EPERM
+ return linuxerr.EPERM
}
for pg := s.processGroups.Front(); pg != nil; pg = pg.Next() {
if pg.id == ProcessGroupID(id) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
}
@@ -437,17 +443,17 @@ func (tg *ThreadGroup) JoinProcessGroup(pidns *PIDNamespace, pgid ProcessGroupID
// Lookup the ProcessGroup.
pg := pidns.processGroups[pgid]
if pg == nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Disallow the join if an execve has performed, per POSIX.
if checkExec && tg.execed {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// See if it's in the same session as ours.
if pg.session != tg.processGroup.session {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Join the group; adjust children.
diff --git a/pkg/sentry/kernel/shm/BUILD b/pkg/sentry/kernel/shm/BUILD
index 1c3c0794f..2547957ba 100644
--- a/pkg/sentry/kernel/shm/BUILD
+++ b/pkg/sentry/kernel/shm/BUILD
@@ -28,6 +28,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/refs",
@@ -35,12 +36,12 @@ go_library(
"//pkg/sentry/device",
"//pkg/sentry/fs",
"//pkg/sentry/kernel/auth",
+ "//pkg/sentry/kernel/ipc",
"//pkg/sentry/kernel/time",
"//pkg/sentry/memmap",
"//pkg/sentry/pgalloc",
"//pkg/sentry/usage",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/kernel/shm/shm.go b/pkg/sentry/kernel/shm/shm.go
index a73f1bdca..ab938fa3c 100644
--- a/pkg/sentry/kernel/shm/shm.go
+++ b/pkg/sentry/kernel/shm/shm.go
@@ -38,24 +38,19 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
-// Key represents a shm segment key. Analogous to a file name.
-type Key int32
-
-// ID represents the opaque handle for a shm segment. Analogous to an fd.
-type ID int32
-
// Registry tracks all shared memory segments in an IPC namespace. The registry
// provides the mechanisms for creating and finding segments, and reporting
// global shm parameters.
@@ -68,50 +63,51 @@ type Registry struct {
// mu protects all fields below.
mu sync.Mutex `state:"nosave"`
- // shms maps segment ids to segments.
+ // reg defines basic fields and operations needed for all SysV registries.
+ //
+ // Withing reg, there are two maps, Objects and KeysToIDs.
//
- // shms holds all referenced segments, which are removed on the last
+ // reg.objects holds all referenced segments, which are removed on the last
// DecRef. Thus, it cannot itself hold a reference on the Shm.
//
// Since removal only occurs after the last (unlocked) DecRef, there
// exists a short window during which a Shm still exists in Shm, but is
// unreferenced. Users must use TryIncRef to determine if the Shm is
// still valid.
- shms map[ID]*Shm
-
- // keysToShms maps segment keys to segments.
//
- // Shms in keysToShms are guaranteed to be referenced, as they are
+ // keysToIDs maps segment keys to IDs.
+ //
+ // Shms in keysToIDs are guaranteed to be referenced, as they are
// removed by disassociateKey before the last DecRef.
- keysToShms map[Key]*Shm
+ reg *ipc.Registry
// Sum of the sizes of all existing segments rounded up to page size, in
// units of page size.
totalPages uint64
-
- // ID assigned to the last created segment. Used to quickly find the next
- // unused ID.
- lastIDUsed ID
}
// NewRegistry creates a new shm registry.
func NewRegistry(userNS *auth.UserNamespace) *Registry {
return &Registry{
- userNS: userNS,
- shms: make(map[ID]*Shm),
- keysToShms: make(map[Key]*Shm),
+ userNS: userNS,
+ reg: ipc.NewRegistry(userNS),
}
}
// FindByID looks up a segment given an ID.
//
// FindByID returns a reference on Shm.
-func (r *Registry) FindByID(id ID) *Shm {
+func (r *Registry) FindByID(id ipc.ID) *Shm {
r.mu.Lock()
defer r.mu.Unlock()
- s := r.shms[id]
+ mech := r.reg.FindByID(id)
+ if mech == nil {
+ return nil
+ }
+ s := mech.(*Shm)
+
// Take a reference on s. If TryIncRef fails, s has reached the last
- // DecRef, but hasn't quite been removed from r.shms yet.
+ // DecRef, but hasn't quite been removed from r.reg.objects yet.
if s != nil && s.TryIncRef() {
return s
}
@@ -128,9 +124,9 @@ func (r *Registry) dissociateKey(s *Shm) {
defer r.mu.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
- if s.key != linux.IPC_PRIVATE {
- delete(r.keysToShms, s.key)
- s.key = linux.IPC_PRIVATE
+ if s.obj.Key != linux.IPC_PRIVATE {
+ r.reg.DissociateKey(s.obj.Key)
+ s.obj.Key = linux.IPC_PRIVATE
}
}
@@ -138,82 +134,60 @@ func (r *Registry) dissociateKey(s *Shm) {
// analogous to open(2).
//
// FindOrCreate returns a reference on Shm.
-func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size uint64, mode linux.FileMode, private, create, exclusive bool) (*Shm, error) {
+func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key ipc.Key, size uint64, mode linux.FileMode, private, create, exclusive bool) (*Shm, error) {
if (create || private) && (size < linux.SHMMIN || size > linux.SHMMAX) {
// "A new segment was to be created and size is less than SHMMIN or
// greater than SHMMAX." - man shmget(2)
//
// Note that 'private' always implies the creation of a new segment
// whether IPC_CREAT is specified or not.
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
r.mu.Lock()
defer r.mu.Unlock()
- if len(r.shms) >= linux.SHMMNI {
+ if r.reg.ObjectCount() >= linux.SHMMNI {
// "All possible shared memory IDs have been taken (SHMMNI) ..."
// - man shmget(2)
- return nil, syserror.ENOSPC
+ return nil, linuxerr.ENOSPC
}
if !private {
- // Look up an existing segment.
- if shm := r.keysToShms[key]; shm != nil {
- shm.mu.Lock()
- defer shm.mu.Unlock()
-
- // Check that caller can access the segment.
- if !shm.checkPermissions(ctx, fs.PermsFromMode(mode)) {
- // "The user does not have permission to access the shared
- // memory segment, and does not have the CAP_IPC_OWNER
- // capability in the user namespace that governs its IPC
- // namespace." - man shmget(2)
- return nil, syserror.EACCES
- }
+ shm, err := r.reg.Find(ctx, key, mode, create, exclusive)
+ if err != nil {
+ return nil, err
+ }
+ // Validate shm-specific parameters.
+ if shm != nil {
+ shm := shm.(*Shm)
if size > shm.size {
// "A segment for the given key exists, but size is greater than
// the size of that segment." - man shmget(2)
- return nil, syserror.EINVAL
- }
-
- if create && exclusive {
- // "IPC_CREAT and IPC_EXCL were specified in shmflg, but a
- // shared memory segment already exists for key."
- // - man shmget(2)
- return nil, syserror.EEXIST
+ return nil, linuxerr.EINVAL
}
-
shm.IncRef()
return shm, nil
}
-
- if !create {
- // "No segment exists for the given key, and IPC_CREAT was not
- // specified." - man shmget(2)
- return nil, syserror.ENOENT
- }
}
var sizeAligned uint64
if val, ok := hostarch.Addr(size).RoundUp(); ok {
sizeAligned = uint64(val)
} else {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
if numPages := sizeAligned / hostarch.PageSize; r.totalPages+numPages > linux.SHMALL {
// "... allocating a segment of the requested size would cause the
// system to exceed the system-wide limit on shared memory (SHMALL)."
// - man shmget(2)
- return nil, syserror.ENOSPC
+ return nil, linuxerr.ENOSPC
}
// Need to create a new segment.
- creator := fs.FileOwnerFromContext(ctx)
- perms := fs.FilePermsFromMode(mode)
- s, err := r.newShm(ctx, pid, key, creator, perms, size)
+ s, err := r.newShmLocked(ctx, pid, key, fs.FileOwnerFromContext(ctx), fs.FilePermsFromMode(mode), size)
if err != nil {
return nil, err
}
@@ -223,10 +197,10 @@ func (r *Registry) FindOrCreate(ctx context.Context, pid int32, key Key, size ui
return s, nil
}
-// newShm creates a new segment in the registry.
+// newShmLocked creates a new segment in the registry.
//
// Precondition: Caller must hold r.mu.
-func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.FileOwner, perms fs.FilePermissions, size uint64) (*Shm, error) {
+func (r *Registry) newShmLocked(ctx context.Context, pid int32, key ipc.Key, creator fs.FileOwner, perms fs.FilePermissions, size uint64) (*Shm, error) {
mfp := pgalloc.MemoryFileProviderFromContext(ctx)
if mfp == nil {
panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFileProvider))
@@ -241,40 +215,21 @@ func (r *Registry) newShm(ctx context.Context, pid int32, key Key, creator fs.Fi
shm := &Shm{
mfp: mfp,
registry: r,
- creator: creator,
size: size,
effectiveSize: effectiveSize,
+ obj: ipc.NewObject(r.reg.UserNS, ipc.Key(key), creator, creator, perms),
fr: fr,
- key: key,
- perms: perms,
- owner: creator,
creatorPID: pid,
changeTime: ktime.NowFromContext(ctx),
}
shm.InitRefs()
- // Find the next available ID.
- for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ {
- // Handle wrap around.
- if id < 0 {
- id = 0
- continue
- }
- if r.shms[id] == nil {
- r.lastIDUsed = id
-
- shm.ID = id
- r.shms[id] = shm
- r.keysToShms[key] = shm
-
- r.totalPages += effectiveSize / hostarch.PageSize
-
- return shm, nil
- }
+ if err := r.reg.Register(shm); err != nil {
+ return nil, err
}
+ r.totalPages += effectiveSize / hostarch.PageSize
- log.Warningf("Shm ids exhuasted, they may be leaking")
- return nil, syserror.ENOSPC
+ return shm, nil
}
// IPCInfo reports global parameters for sysv shared memory segments on this
@@ -296,7 +251,7 @@ func (r *Registry) ShmInfo() *linux.ShmInfo {
defer r.mu.Unlock()
return &linux.ShmInfo{
- UsedIDs: int32(r.lastIDUsed),
+ UsedIDs: int32(r.reg.LastIDUsed()),
ShmTot: r.totalPages,
ShmRss: r.totalPages, // We could probably get a better estimate from memory accounting.
ShmSwp: 0, // No reclaim at the moment.
@@ -313,11 +268,11 @@ func (r *Registry) remove(s *Shm) {
s.mu.Lock()
defer s.mu.Unlock()
- if s.key != linux.IPC_PRIVATE {
+ if s.obj.Key != linux.IPC_PRIVATE {
panic(fmt.Sprintf("Attempted to remove %s from the registry whose key is still associated", s.debugLocked()))
}
- delete(r.shms, s.ID)
+ r.reg.DissociateID(s.obj.ID)
r.totalPages -= s.effectiveSize / hostarch.PageSize
}
@@ -329,13 +284,16 @@ func (r *Registry) Release(ctx context.Context) {
// the IPC namespace containing it has no more references.
toRelease := make([]*Shm, 0)
r.mu.Lock()
- for _, s := range r.keysToShms {
- s.mu.Lock()
- if !s.pendingDestruction {
- toRelease = append(toRelease, s)
- }
- s.mu.Unlock()
- }
+ r.reg.ForAllObjects(
+ func(o ipc.Mechanism) {
+ s := o.(*Shm)
+ s.mu.Lock()
+ if !s.pendingDestruction {
+ toRelease = append(toRelease, s)
+ }
+ s.mu.Unlock()
+ },
+ )
r.mu.Unlock()
for _, s := range toRelease {
@@ -373,12 +331,6 @@ type Shm struct {
// registry points to the shm registry containing this segment. Immutable.
registry *Registry
- // ID is the kernel identifier for this segment. Immutable.
- ID ID
-
- // creator is the user that created the segment. Immutable.
- creator fs.FileOwner
-
// size is the requested size of the segment at creation, in
// bytes. Immutable.
size uint64
@@ -396,14 +348,8 @@ type Shm struct {
// mu protects all fields below.
mu sync.Mutex `state:"nosave"`
- // key is the public identifier for this segment.
- key Key
-
- // perms is the access permissions for the segment.
- perms fs.FilePermissions
+ obj *ipc.Object
- // owner of this segment.
- owner fs.FileOwner
// attachTime is updated on every successful shmat.
attachTime ktime.Time
// detachTime is updated on every successful shmdt.
@@ -425,17 +371,44 @@ type Shm struct {
pendingDestruction bool
}
+// ID returns object's ID.
+func (s *Shm) ID() ipc.ID {
+ return s.obj.ID
+}
+
+// Object implements ipc.Mechanism.Object.
+func (s *Shm) Object() *ipc.Object {
+ return s.obj
+}
+
+// Destroy implements ipc.Mechanism.Destroy. No work is performed on shm.Destroy
+// because a different removal mechanism is used in shm. See Shm.MarkDestroyed.
+func (s *Shm) Destroy() {
+}
+
+// Lock implements ipc.Mechanism.Lock.
+func (s *Shm) Lock() {
+ s.mu.Lock()
+}
+
+// Unlock implements ipc.mechanism.Unlock.
+//
+// +checklocksignore
+func (s *Shm) Unlock() {
+ s.mu.Unlock()
+}
+
// Precondition: Caller must hold s.mu.
func (s *Shm) debugLocked() string {
return fmt.Sprintf("Shm{id: %d, key: %d, size: %d bytes, refs: %d, destroyed: %v}",
- s.ID, s.key, s.size, s.ReadRefs(), s.pendingDestruction)
+ s.obj.ID, s.obj.Key, s.size, s.ReadRefs(), s.pendingDestruction)
}
// MappedName implements memmap.MappingIdentity.MappedName.
func (s *Shm) MappedName(ctx context.Context) string {
s.mu.Lock()
defer s.mu.Unlock()
- return fmt.Sprintf("SYSV%08d", s.key)
+ return fmt.Sprintf("SYSV%08d", s.obj.Key)
}
// DeviceID implements memmap.MappingIdentity.DeviceID.
@@ -447,7 +420,7 @@ func (s *Shm) DeviceID() uint64 {
func (s *Shm) InodeID() uint64 {
// "shmid gets reported as "inode#" in /proc/pid/maps. proc-ps tools use
// this. Changing this will break them." -- Linux, ipc/shm.c:newseg()
- return uint64(s.ID)
+ return uint64(s.obj.ID)
}
// DecRef drops a reference on s.
@@ -511,7 +484,7 @@ func (*Shm) CopyMapping(context.Context, memmap.MappingSpace, hostarch.AddrRange
func (s *Shm) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > s.fr.Length() {
- err = &memmap.BusError{syserror.EFAULT}
+ err = &memmap.BusError{linuxerr.EFAULT}
}
if source := optional.Intersect(memmap.MappableRange{0, s.fr.Length()}); source.Length() != 0 {
return []memmap.Translation{
@@ -547,10 +520,11 @@ func (s *Shm) ConfigureAttach(ctx context.Context, addr hostarch.Addr, opts Atta
s.mu.Lock()
defer s.mu.Unlock()
if s.pendingDestruction && s.ReadRefs() == 0 {
- return memmap.MMapOpts{}, syserror.EIDRM
+ return memmap.MMapOpts{}, linuxerr.EIDRM
}
- if !s.checkPermissions(ctx, fs.PermMask{
+ creds := auth.CredentialsFromContext(ctx)
+ if !s.obj.CheckPermissions(creds, fs.PermMask{
Read: true,
Write: !opts.Readonly,
Execute: opts.Execute,
@@ -558,7 +532,7 @@ func (s *Shm) ConfigureAttach(ctx context.Context, addr hostarch.Addr, opts Atta
// "The calling process does not have the required permissions for the
// requested attach type, and does not have the CAP_IPC_OWNER capability
// in the user namespace that governs its IPC namespace." - man shmat(2)
- return memmap.MMapOpts{}, syserror.EACCES
+ return memmap.MMapOpts{}, linuxerr.EACCES
}
return memmap.MMapOpts{
Length: s.size,
@@ -590,19 +564,19 @@ func (s *Shm) IPCStat(ctx context.Context) (*linux.ShmidDS, error) {
// "The caller must have read permission on the shared memory segment."
// - man shmctl(2)
- if !s.checkPermissions(ctx, fs.PermMask{Read: true}) {
+ creds := auth.CredentialsFromContext(ctx)
+ if !s.obj.CheckPermissions(creds, fs.PermMask{Read: true}) {
// "IPC_STAT or SHM_STAT is requested and shm_perm.mode does not allow
// read access for shmid, and the calling process does not have the
// CAP_IPC_OWNER capability in the user namespace that governs its IPC
// namespace." - man shmctl(2)
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
var mode uint16
if s.pendingDestruction {
mode |= linux.SHM_DEST
}
- creds := auth.CredentialsFromContext(ctx)
// Use the reference count as a rudimentary count of the number of
// attaches. We exclude:
@@ -619,12 +593,12 @@ func (s *Shm) IPCStat(ctx context.Context) (*linux.ShmidDS, error) {
ds := &linux.ShmidDS{
ShmPerm: linux.IPCPerm{
- Key: uint32(s.key),
- UID: uint32(creds.UserNamespace.MapFromKUID(s.owner.UID)),
- GID: uint32(creds.UserNamespace.MapFromKGID(s.owner.GID)),
- CUID: uint32(creds.UserNamespace.MapFromKUID(s.creator.UID)),
- CGID: uint32(creds.UserNamespace.MapFromKGID(s.creator.GID)),
- Mode: mode | uint16(s.perms.LinuxMode()),
+ Key: uint32(s.obj.Key),
+ UID: uint32(creds.UserNamespace.MapFromKUID(s.obj.Owner.UID)),
+ GID: uint32(creds.UserNamespace.MapFromKGID(s.obj.Owner.GID)),
+ CUID: uint32(creds.UserNamespace.MapFromKUID(s.obj.Creator.UID)),
+ CGID: uint32(creds.UserNamespace.MapFromKGID(s.obj.Creator.GID)),
+ Mode: mode | uint16(s.obj.Perms.LinuxMode()),
Seq: 0, // IPC sequences not supported.
},
ShmSegsz: s.size,
@@ -644,25 +618,10 @@ func (s *Shm) Set(ctx context.Context, ds *linux.ShmidDS) error {
s.mu.Lock()
defer s.mu.Unlock()
- if !s.checkOwnership(ctx) {
- return syserror.EPERM
- }
-
- creds := auth.CredentialsFromContext(ctx)
- uid := creds.UserNamespace.MapToKUID(auth.UID(ds.ShmPerm.UID))
- gid := creds.UserNamespace.MapToKGID(auth.GID(ds.ShmPerm.GID))
- if !uid.Ok() || !gid.Ok() {
- return syserror.EINVAL
+ if err := s.obj.Set(ctx, &ds.ShmPerm); err != nil {
+ return err
}
- // User may only modify the lower 9 bits of the mode. All the other bits are
- // always 0 for the underlying inode.
- mode := linux.FileMode(ds.ShmPerm.Mode & 0x1ff)
- s.perms = fs.FilePermsFromMode(mode)
-
- s.owner.UID = uid
- s.owner.GID = gid
-
s.changeTime = ktime.NowFromContext(ctx)
return nil
}
@@ -690,40 +649,3 @@ func (s *Shm) MarkDestroyed(ctx context.Context) {
s.DecRef(ctx)
return
}
-
-// checkOwnership verifies whether a segment may be accessed by ctx as an
-// owner. See ipc/util.c:ipcctl_pre_down_nolock() in Linux.
-//
-// Precondition: Caller must hold s.mu.
-func (s *Shm) checkOwnership(ctx context.Context) bool {
- creds := auth.CredentialsFromContext(ctx)
- if s.owner.UID == creds.EffectiveKUID || s.creator.UID == creds.EffectiveKUID {
- return true
- }
-
- // Tasks with CAP_SYS_ADMIN may bypass ownership checks. Strangely, Linux
- // doesn't use CAP_IPC_OWNER for this despite CAP_IPC_OWNER being documented
- // for use to "override IPC ownership checks".
- return creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, s.registry.userNS)
-}
-
-// checkPermissions verifies whether a segment is accessible by ctx for access
-// described by req. See ipc/util.c:ipcperms() in Linux.
-//
-// Precondition: Caller must hold s.mu.
-func (s *Shm) checkPermissions(ctx context.Context, req fs.PermMask) bool {
- creds := auth.CredentialsFromContext(ctx)
-
- p := s.perms.Other
- if s.owner.UID == creds.EffectiveKUID {
- p = s.perms.User
- } else if creds.InGroup(s.owner.GID) {
- p = s.perms.Group
- }
- if p.SupersetOf(req) {
- return true
- }
-
- // Tasks with CAP_IPC_OWNER may bypass permission checks.
- return creds.HasCapabilityIn(linux.CAP_IPC_OWNER, s.registry.userNS)
-}
diff --git a/pkg/sentry/kernel/signalfd/BUILD b/pkg/sentry/kernel/signalfd/BUILD
index 76d472292..4180ca28e 100644
--- a/pkg/sentry/kernel/signalfd/BUILD
+++ b/pkg/sentry/kernel/signalfd/BUILD
@@ -9,12 +9,12 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/fs",
"//pkg/sentry/fs/anon",
"//pkg/sentry/fs/fsutil",
"//pkg/sentry/kernel",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/kernel/signalfd/signalfd.go b/pkg/sentry/kernel/signalfd/signalfd.go
index f58ec4194..9c5e6698c 100644
--- a/pkg/sentry/kernel/signalfd/signalfd.go
+++ b/pkg/sentry/kernel/signalfd/signalfd.go
@@ -18,12 +18,12 @@ package signalfd
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -64,7 +64,7 @@ func New(ctx context.Context, mask linux.SignalSet) (*fs.File, error) {
t := kernel.TaskFromContext(ctx)
if t == nil {
// No task context? Not valid.
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
// name matches fs/signalfd.c:signalfd4.
dirent := fs.NewDirent(ctx, anon.NewInode(ctx), "anon_inode:[signalfd]")
@@ -98,7 +98,7 @@ func (s *SignalOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS
info, err := s.target.Sigtimedwait(s.Mask(), 0)
if err != nil {
// There must be no signal available.
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
// Copy out the signal info using the specified format.
diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go
index b21a6ad57..9a95bf44c 100644
--- a/pkg/sentry/kernel/task.go
+++ b/pkg/sentry/kernel/task.go
@@ -21,6 +21,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/inet"
@@ -29,10 +30,10 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/sched"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/platform"
+ "gvisor.dev/gvisor/pkg/sentry/seccheck"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -232,7 +233,7 @@ type Task struct {
// exitStatus is the task's exit status.
//
// exitStatus is protected by the signal mutex.
- exitStatus ExitStatus
+ exitStatus linux.WaitStatus
// syscallRestartBlock represents a custom restart function to run in
// restart_syscall(2) to resume an interrupted syscall.
@@ -846,21 +847,19 @@ func (t *Task) OOMScoreAdj() int32 {
// value should be between -1000 and 1000 inclusive.
func (t *Task) SetOOMScoreAdj(adj int32) error {
if adj > 1000 || adj < -1000 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
atomic.StoreInt32(&t.tg.oomScoreAdj, adj)
return nil
}
-// UID returns t's uid.
-// TODO(gvisor.dev/issue/170): This method is not namespaced yet.
-func (t *Task) UID() uint32 {
+// KUID returns t's kuid.
+func (t *Task) KUID() uint32 {
return uint32(t.Credentials().EffectiveKUID)
}
-// GID returns t's gid.
-// TODO(gvisor.dev/issue/170): This method is not namespaced yet.
-func (t *Task) GID() uint32 {
+// KGID returns t's kgid.
+func (t *Task) KGID() uint32 {
return uint32(t.Credentials().EffectiveKGID)
}
@@ -876,3 +875,23 @@ func (t *Task) ResetKcov() {
t.kcov = nil
}
}
+
+// Preconditions: The TaskSet mutex must be locked.
+func (t *Task) loadSeccheckInfoLocked(req seccheck.TaskFieldSet, mask *seccheck.TaskFieldSet, info *seccheck.TaskInfo) {
+ if req.Contains(seccheck.TaskFieldThreadID) {
+ info.ThreadID = int32(t.k.tasks.Root.tids[t])
+ mask.Add(seccheck.TaskFieldThreadID)
+ }
+ if req.Contains(seccheck.TaskFieldThreadStartTime) {
+ info.ThreadStartTime = t.startTime
+ mask.Add(seccheck.TaskFieldThreadStartTime)
+ }
+ if req.Contains(seccheck.TaskFieldThreadGroupID) {
+ info.ThreadGroupID = int32(t.k.tasks.Root.tgids[t.tg])
+ mask.Add(seccheck.TaskFieldThreadGroupID)
+ }
+ if req.Contains(seccheck.TaskFieldThreadGroupStartTime) {
+ info.ThreadGroupStartTime = t.tg.leader.startTime
+ mask.Add(seccheck.TaskFieldThreadGroupStartTime)
+ }
+}
diff --git a/pkg/sentry/kernel/task_acct.go b/pkg/sentry/kernel/task_acct.go
index e574997f7..dd364ae50 100644
--- a/pkg/sentry/kernel/task_acct.go
+++ b/pkg/sentry/kernel/task_acct.go
@@ -18,10 +18,10 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Getitimer implements getitimer(2).
@@ -44,7 +44,7 @@ func (t *Task) Getitimer(id int32) (linux.ItimerVal, error) {
s, _ = t.tg.itimerProfSetting.At(tm)
t.tg.signalHandlers.mu.Unlock()
default:
- return linux.ItimerVal{}, syserror.EINVAL
+ return linux.ItimerVal{}, linuxerr.EINVAL
}
val, iv := ktime.SpecFromSetting(tm, s)
return linux.ItimerVal{
@@ -105,7 +105,7 @@ func (t *Task) Setitimer(id int32, newitv linux.ItimerVal) (linux.ItimerVal, err
return linux.ItimerVal{}, err
}
default:
- return linux.ItimerVal{}, syserror.EINVAL
+ return linux.ItimerVal{}, linuxerr.EINVAL
}
oldval, oldiv := ktime.SpecFromSetting(tm, olds)
return linux.ItimerVal{
diff --git a/pkg/sentry/kernel/task_block.go b/pkg/sentry/kernel/task_block.go
index ecbe8f920..9bfc155e4 100644
--- a/pkg/sentry/kernel/task_block.go
+++ b/pkg/sentry/kernel/task_block.go
@@ -19,9 +19,9 @@ import (
"runtime/trace"
"time"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// BlockWithTimeout blocks t until an event is received from C, the application
@@ -32,7 +32,7 @@ import (
// and is unspecified if haveTimeout is false.
//
// - An error which is nil if an event is received from C, ETIMEDOUT if the timeout
-// expired, and syserror.ErrInterrupted if t is interrupted.
+// expired, and linuxerr.ErrInterrupted if t is interrupted.
//
// Preconditions: The caller must be running on the task goroutine.
func (t *Task) BlockWithTimeout(C chan struct{}, haveTimeout bool, timeout time.Duration) (time.Duration, error) {
@@ -45,7 +45,7 @@ func (t *Task) BlockWithTimeout(C chan struct{}, haveTimeout bool, timeout time.
err := t.BlockWithDeadline(C, true, deadline)
// Timeout, explicitly return a remaining duration of 0.
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return 0, err
}
@@ -66,7 +66,7 @@ func (t *Task) BlockWithTimeout(C chan struct{}, haveTimeout bool, timeout time.
// application monotonic clock indicates a time of deadline (only if
// haveDeadline is true), or t is interrupted. It returns nil if an event is
// received from C, ETIMEDOUT if the deadline expired, and
-// syserror.ErrInterrupted if t is interrupted.
+// linuxerr.ErrInterrupted if t is interrupted.
//
// Preconditions: The caller must be running on the task goroutine.
func (t *Task) BlockWithDeadline(C <-chan struct{}, haveDeadline bool, deadline ktime.Time) error {
@@ -94,7 +94,7 @@ func (t *Task) BlockWithDeadline(C <-chan struct{}, haveDeadline bool, deadline
// BlockWithTimer blocks t until an event is received from C or tchan, or t is
// interrupted. It returns nil if an event is received from C, ETIMEDOUT if an
-// event is received from tchan, and syserror.ErrInterrupted if t is
+// event is received from tchan, and linuxerr.ErrInterrupted if t is
// interrupted.
//
// Most clients should use BlockWithDeadline or BlockWithTimeout instead.
@@ -105,7 +105,7 @@ func (t *Task) BlockWithTimer(C <-chan struct{}, tchan <-chan struct{}) error {
}
// Block blocks t until an event is received from C or t is interrupted. It
-// returns nil if an event is received from C and syserror.ErrInterrupted if t
+// returns nil if an event is received from C and linuxerr.ErrInterrupted if t
// is interrupted.
//
// Preconditions: The caller must be running on the task goroutine.
@@ -156,13 +156,13 @@ func (t *Task) block(C <-chan struct{}, timerChan <-chan struct{}) error {
region.End()
t.SleepFinish(false)
// Return the indicated error on interrupt.
- return syserror.ErrInterrupted
+ return linuxerr.ErrInterrupted
case <-timerChan:
region.End()
t.SleepFinish(true)
// We've timed out.
- return syserror.ETIMEDOUT
+ return linuxerr.ETIMEDOUT
}
}
diff --git a/pkg/sentry/kernel/task_cgroup.go b/pkg/sentry/kernel/task_cgroup.go
index 7c138e80f..828b90014 100644
--- a/pkg/sentry/kernel/task_cgroup.go
+++ b/pkg/sentry/kernel/task_cgroup.go
@@ -20,15 +20,13 @@ import (
"sort"
"strings"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/pkg/syserror"
)
// EnterInitialCgroups moves t into an initial set of cgroups.
//
// Precondition: t isn't in any cgroups yet, t.cgs is empty.
-//
-// +checklocksignore parent.mu is conditionally acquired.
func (t *Task) EnterInitialCgroups(parent *Task) {
var inherit map[Cgroup]struct{}
if parent != nil {
@@ -67,7 +65,7 @@ func (t *Task) EnterCgroup(c Cgroup) error {
//
// TODO(b/183137098): Implement cgroup migration.
log.Warningf("Cgroup migration is not implemented")
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
}
diff --git a/pkg/sentry/kernel/task_clone.go b/pkg/sentry/kernel/task_clone.go
index 405771f3f..26a981f36 100644
--- a/pkg/sentry/kernel/task_clone.go
+++ b/pkg/sentry/kernel/task_clone.go
@@ -20,147 +20,47 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
"gvisor.dev/gvisor/pkg/cleanup"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/inet"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/sentry/seccheck"
"gvisor.dev/gvisor/pkg/usermem"
)
-// SharingOptions controls what resources are shared by a new task created by
-// Task.Clone, or an existing task affected by Task.Unshare.
-type SharingOptions struct {
- // If NewAddressSpace is true, the task should have an independent virtual
- // address space.
- NewAddressSpace bool
-
- // If NewSignalHandlers is true, the task should use an independent set of
- // signal handlers.
- NewSignalHandlers bool
-
- // If NewThreadGroup is true, the task should be the leader of its own
- // thread group. TerminationSignal is the signal that the thread group
- // will send to its parent when it exits. If NewThreadGroup is false,
- // TerminationSignal is ignored.
- NewThreadGroup bool
- TerminationSignal linux.Signal
-
- // If NewPIDNamespace is true:
- //
- // - In the context of Task.Clone, the new task should be the init task
- // (TID 1) in a new PID namespace.
- //
- // - In the context of Task.Unshare, the task should create a new PID
- // namespace, and all subsequent clones of the task should be members of
- // the new PID namespace.
- NewPIDNamespace bool
-
- // If NewUserNamespace is true, the task should have an independent user
- // namespace.
- NewUserNamespace bool
-
- // If NewNetworkNamespace is true, the task should have an independent
- // network namespace.
- NewNetworkNamespace bool
-
- // If NewFiles is true, the task should use an independent file descriptor
- // table.
- NewFiles bool
-
- // If NewFSContext is true, the task should have an independent FSContext.
- NewFSContext bool
-
- // If NewUTSNamespace is true, the task should have an independent UTS
- // namespace.
- NewUTSNamespace bool
-
- // If NewIPCNamespace is true, the task should have an independent IPC
- // namespace.
- NewIPCNamespace bool
-}
-
-// CloneOptions controls the behavior of Task.Clone.
-type CloneOptions struct {
- // SharingOptions defines the set of resources that the new task will share
- // with its parent.
- SharingOptions
-
- // Stack is the initial stack pointer of the new task. If Stack is 0, the
- // new task will start with the same stack pointer as its parent.
- Stack hostarch.Addr
-
- // If SetTLS is true, set the new task's TLS (thread-local storage)
- // descriptor to TLS. If SetTLS is false, TLS is ignored.
- SetTLS bool
- TLS hostarch.Addr
-
- // If ChildClearTID is true, when the child exits, 0 is written to the
- // address ChildTID in the child's memory, and if the write is successful a
- // futex wake on the same address is performed.
- //
- // If ChildSetTID is true, the child's thread ID (in the child's PID
- // namespace) is written to address ChildTID in the child's memory. (As in
- // Linux, failed writes are silently ignored.)
- ChildClearTID bool
- ChildSetTID bool
- ChildTID hostarch.Addr
-
- // If ParentSetTID is true, the child's thread ID (in the parent's PID
- // namespace) is written to address ParentTID in the parent's memory. (As
- // in Linux, failed writes are silently ignored.)
- //
- // Older versions of the clone(2) man page state that CLONE_PARENT_SETTID
- // causes the child's thread ID to be written to ptid in both the parent
- // and child's memory, but this is a documentation error fixed by
- // 87ab04792ced ("clone.2: Fix description of CLONE_PARENT_SETTID").
- ParentSetTID bool
- ParentTID hostarch.Addr
-
- // If Vfork is true, place the parent in vforkStop until the cloned task
- // releases its TaskImage.
- Vfork bool
-
- // If Untraced is true, do not report PTRACE_EVENT_CLONE/FORK/VFORK for
- // this clone(), and do not ptrace-attach the caller's tracer to the new
- // task. (PTRACE_EVENT_VFORK_DONE will still be reported if appropriate).
- Untraced bool
-
- // If InheritTracer is true, ptrace-attach the caller's tracer to the new
- // task, even if no PTRACE_EVENT_CLONE/FORK/VFORK event would be reported
- // for it. If both Untraced and InheritTracer are true, no event will be
- // reported, but tracer inheritance will still occur.
- InheritTracer bool
-}
-
// Clone implements the clone(2) syscall and returns the thread ID of the new
// task in t's PID namespace. Clone may return both a non-zero thread ID and a
// non-nil error.
//
// Preconditions: The caller must be running Task.doSyscallInvoke on the task
// goroutine.
-func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
+func (t *Task) Clone(args *linux.CloneArgs) (ThreadID, *SyscallControl, error) {
// Since signal actions may refer to application signal handlers by virtual
// address, any set of signal handlers must refer to the same address
// space.
- if !opts.NewSignalHandlers && opts.NewAddressSpace {
- return 0, nil, syserror.EINVAL
+ if args.Flags&(linux.CLONE_SIGHAND|linux.CLONE_VM) == linux.CLONE_SIGHAND {
+ return 0, nil, linuxerr.EINVAL
}
// In order for the behavior of thread-group-directed signals to be sane,
// all tasks in a thread group must share signal handlers.
- if !opts.NewThreadGroup && opts.NewSignalHandlers {
- return 0, nil, syserror.EINVAL
+ if args.Flags&(linux.CLONE_THREAD|linux.CLONE_SIGHAND) == linux.CLONE_THREAD {
+ return 0, nil, linuxerr.EINVAL
}
// All tasks in a thread group must be in the same PID namespace.
- if !opts.NewThreadGroup && (opts.NewPIDNamespace || t.childPIDNamespace != nil) {
- return 0, nil, syserror.EINVAL
+ if (args.Flags&linux.CLONE_THREAD != 0) && (args.Flags&linux.CLONE_NEWPID != 0 || t.childPIDNamespace != nil) {
+ return 0, nil, linuxerr.EINVAL
}
// The two different ways of specifying a new PID namespace are
// incompatible.
- if opts.NewPIDNamespace && t.childPIDNamespace != nil {
- return 0, nil, syserror.EINVAL
+ if args.Flags&linux.CLONE_NEWPID != 0 && t.childPIDNamespace != nil {
+ return 0, nil, linuxerr.EINVAL
}
// Thread groups and FS contexts cannot span user namespaces.
- if opts.NewUserNamespace && (!opts.NewThreadGroup || !opts.NewFSContext) {
- return 0, nil, syserror.EINVAL
+ if args.Flags&linux.CLONE_NEWUSER != 0 && args.Flags&(linux.CLONE_THREAD|linux.CLONE_FS) != 0 {
+ return 0, nil, linuxerr.EINVAL
+ }
+ // args.ExitSignal must be a valid signal.
+ if args.ExitSignal != 0 && !linux.Signal(args.ExitSignal).IsValid() {
+ return 0, nil, linuxerr.EINVAL
}
// Pull task registers and FPU state, a cloned task will inherit the
@@ -174,7 +74,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
// user_namespaces(7)
creds := t.Credentials()
userns := creds.UserNamespace
- if opts.NewUserNamespace {
+ if args.Flags&linux.CLONE_NEWUSER != 0 {
var err error
// "EPERM (since Linux 3.9): CLONE_NEWUSER was specified in flags and
// the caller is in a chroot environment (i.e., the caller's root
@@ -182,28 +82,26 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
// in which it resides)." - clone(2). Neither chroot(2) nor
// user_namespaces(7) document this.
if t.IsChrooted() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
userns, err = creds.NewChildUserNamespace()
if err != nil {
return 0, nil, err
}
}
- if (opts.NewPIDNamespace || opts.NewNetworkNamespace || opts.NewUTSNamespace) && !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, userns) {
- return 0, nil, syserror.EPERM
+ if args.Flags&(linux.CLONE_NEWPID|linux.CLONE_NEWNET|linux.CLONE_NEWUTS|linux.CLONE_NEWIPC) != 0 && !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, userns) {
+ return 0, nil, linuxerr.EPERM
}
utsns := t.UTSNamespace()
- if opts.NewUTSNamespace {
+ if args.Flags&linux.CLONE_NEWUTS != 0 {
// Note that this must happen after NewUserNamespace so we get
// the new userns if there is one.
utsns = t.UTSNamespace().Clone(userns)
}
ipcns := t.IPCNamespace()
- if opts.NewIPCNamespace {
- // Note that "If CLONE_NEWIPC is set, then create the process in a new IPC
- // namespace"
+ if args.Flags&linux.CLONE_NEWIPC != 0 {
ipcns = NewIPCNamespace(userns)
} else {
ipcns.IncRef()
@@ -214,7 +112,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
defer cu.Clean()
netns := t.NetworkNamespace()
- if opts.NewNetworkNamespace {
+ if args.Flags&linux.CLONE_NEWNET != 0 {
netns = inet.NewNamespace(netns)
}
@@ -227,7 +125,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
})
}
- image, err := t.image.Fork(t, t.k, !opts.NewAddressSpace)
+ image, err := t.image.Fork(t, t.k, args.Flags&linux.CLONE_VM != 0)
if err != nil {
return 0, nil, err
}
@@ -236,17 +134,17 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
})
// clone() returns 0 in the child.
image.Arch.SetReturn(0)
- if opts.Stack != 0 {
- image.Arch.SetStack(uintptr(opts.Stack))
+ if args.Stack != 0 {
+ image.Arch.SetStack(uintptr(args.Stack))
}
- if opts.SetTLS {
- if !image.Arch.SetTLS(uintptr(opts.TLS)) {
- return 0, nil, syserror.EPERM
+ if args.Flags&linux.CLONE_SETTLS != 0 {
+ if !image.Arch.SetTLS(uintptr(args.TLS)) {
+ return 0, nil, linuxerr.EPERM
}
}
var fsContext *FSContext
- if opts.NewFSContext {
+ if args.Flags&linux.CLONE_FS == 0 {
fsContext = t.fsContext.Fork()
} else {
fsContext = t.fsContext
@@ -254,7 +152,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
}
var fdTable *FDTable
- if opts.NewFiles {
+ if args.Flags&linux.CLONE_FILES == 0 {
fdTable = t.fdTable.Fork(t)
} else {
fdTable = t.fdTable
@@ -264,22 +162,22 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
pidns := t.tg.pidns
if t.childPIDNamespace != nil {
pidns = t.childPIDNamespace
- } else if opts.NewPIDNamespace {
+ } else if args.Flags&linux.CLONE_NEWPID != 0 {
pidns = pidns.NewChild(userns)
}
tg := t.tg
rseqAddr := hostarch.Addr(0)
rseqSignature := uint32(0)
- if opts.NewThreadGroup {
+ if args.Flags&linux.CLONE_THREAD == 0 {
if tg.mounts != nil {
tg.mounts.IncRef()
}
sh := t.tg.signalHandlers
- if opts.NewSignalHandlers {
+ if args.Flags&linux.CLONE_SIGHAND == 0 {
sh = sh.Fork()
}
- tg = t.k.NewThreadGroup(tg.mounts, pidns, sh, opts.TerminationSignal, tg.limits.GetCopy())
+ tg = t.k.NewThreadGroup(tg.mounts, pidns, sh, linux.Signal(args.ExitSignal), tg.limits.GetCopy())
tg.oomScoreAdj = atomic.LoadInt32(&t.tg.oomScoreAdj)
rseqAddr = t.rseqAddr
rseqSignature = t.rseqSignature
@@ -304,7 +202,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
RSeqSignature: rseqSignature,
ContainerID: t.ContainerID(),
}
- if opts.NewThreadGroup {
+ if args.Flags&linux.CLONE_THREAD == 0 {
cfg.Parent = t
} else {
cfg.InheritParent = t
@@ -322,7 +220,7 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
//
// However kernel/fork.c:copy_process() adds a limitation to this:
// "sigaltstack should be cleared when sharing the same VM".
- if opts.NewAddressSpace || opts.Vfork {
+ if args.Flags&linux.CLONE_VM == 0 || args.Flags&linux.CLONE_VFORK != 0 {
nt.SetSignalStack(t.SignalStack())
}
@@ -338,7 +236,23 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
// nt that it must receive before its task goroutine starts running.
tid := nt.k.tasks.Root.IDOfTask(nt)
defer nt.Start(tid)
- t.traceCloneEvent(tid)
+
+ if seccheck.Global.Enabled(seccheck.PointClone) {
+ mask, info := getCloneSeccheckInfo(t, nt, args)
+ if err := seccheck.Global.Clone(t, mask, &info); err != nil {
+ // nt has been visible to the rest of the system since NewTask, so
+ // it may be blocking execve or a group stop, have been notified
+ // for group signal delivery, had children reparented to it, etc.
+ // Thus we can't just drop it on the floor. Instead, instruct the
+ // task goroutine to exit immediately, as quietly as possible.
+ nt.exitTracerNotified = true
+ nt.exitTracerAcked = true
+ nt.exitParentNotified = true
+ nt.exitParentAcked = true
+ nt.runState = (*runExitMain)(nil)
+ return 0, nil, err
+ }
+ }
// "If fork/clone and execve are allowed by @prog, any child processes will
// be constrained to the same filters and system call ABI as the parent." -
@@ -347,41 +261,58 @@ func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) {
copiedFilters := append([]bpf.Program(nil), f.([]bpf.Program)...)
nt.syscallFilters.Store(copiedFilters)
}
- if opts.Vfork {
+ if args.Flags&linux.CLONE_VFORK != 0 {
nt.vforkParent = t
}
- if opts.ChildClearTID {
- nt.SetClearTID(opts.ChildTID)
+ if args.Flags&linux.CLONE_CHILD_CLEARTID != 0 {
+ nt.SetClearTID(hostarch.Addr(args.ChildTID))
}
- if opts.ChildSetTID {
+ if args.Flags&linux.CLONE_CHILD_SETTID != 0 {
ctid := nt.ThreadID()
- ctid.CopyOut(nt.CopyContext(t, usermem.IOOpts{AddressSpaceActive: false}), opts.ChildTID)
+ ctid.CopyOut(nt.CopyContext(t, usermem.IOOpts{AddressSpaceActive: false}), hostarch.Addr(args.ChildTID))
}
ntid := t.tg.pidns.IDOfTask(nt)
- if opts.ParentSetTID {
- ntid.CopyOut(t, opts.ParentTID)
+ if args.Flags&linux.CLONE_PARENT_SETTID != 0 {
+ ntid.CopyOut(t, hostarch.Addr(args.ParentTID))
}
+ t.traceCloneEvent(tid)
kind := ptraceCloneKindClone
- if opts.Vfork {
+ if args.Flags&linux.CLONE_VFORK != 0 {
kind = ptraceCloneKindVfork
- } else if opts.TerminationSignal == linux.SIGCHLD {
+ } else if linux.Signal(args.ExitSignal) == linux.SIGCHLD {
kind = ptraceCloneKindFork
}
- if t.ptraceClone(kind, nt, opts) {
- if opts.Vfork {
+ if t.ptraceClone(kind, nt, args) {
+ if args.Flags&linux.CLONE_VFORK != 0 {
return ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{vforkChild: nt, vforkChildTID: ntid}}, nil
}
return ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{}}, nil
}
- if opts.Vfork {
+ if args.Flags&linux.CLONE_VFORK != 0 {
t.maybeBeginVforkStop(nt)
return ntid, &SyscallControl{next: &runSyscallAfterVforkStop{childTID: ntid}}, nil
}
return ntid, nil, nil
}
+func getCloneSeccheckInfo(t, nt *Task, args *linux.CloneArgs) (seccheck.CloneFieldSet, seccheck.CloneInfo) {
+ req := seccheck.Global.CloneReq()
+ info := seccheck.CloneInfo{
+ Credentials: t.Credentials(),
+ Args: *args,
+ }
+ var mask seccheck.CloneFieldSet
+ mask.Add(seccheck.CloneFieldCredentials)
+ mask.Add(seccheck.CloneFieldArgs)
+ t.k.tasks.mu.RLock()
+ defer t.k.tasks.mu.RUnlock()
+ t.loadSeccheckInfoLocked(req.Invoker, &mask.Invoker, &info.Invoker)
+ nt.loadSeccheckInfoLocked(req.Created, &mask.Created, &info.Created)
+ return mask, info
+}
+
// maybeBeginVforkStop checks if a previously-started vfork child is still
// running and has not yet released its MM, such that its parent t should enter
// a vforkStop.
@@ -446,39 +377,47 @@ func (r *runSyscallAfterVforkStop) execute(t *Task) taskRunState {
}
// Unshare changes the set of resources t shares with other tasks, as specified
-// by opts.
+// by flags.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) Unshare(opts *SharingOptions) error {
- // In Linux unshare(2), NewThreadGroup implies NewSignalHandlers and
- // NewSignalHandlers implies NewAddressSpace. All three flags are no-ops if
- // t is the only task using its MM, which due to clone(2)'s rules imply
- // that it is also the only task using its signal handlers / in its thread
- // group, and cause EINVAL to be returned otherwise.
+func (t *Task) Unshare(flags int32) error {
+ // "CLONE_THREAD, CLONE_SIGHAND, and CLONE_VM can be specified in flags if
+ // the caller is single threaded (i.e., it is not sharing its address space
+ // with another process or thread). In this case, these flags have no
+ // effect. (Note also that specifying CLONE_THREAD automatically implies
+ // CLONE_VM, and specifying CLONE_VM automatically implies CLONE_SIGHAND.)
+ // If the process is multithreaded, then the use of these flags results in
+ // an error." - unshare(2). This is incorrect (cf.
+ // kernel/fork.c:ksys_unshare()):
+ //
+ // - CLONE_THREAD does not imply CLONE_VM.
+ //
+ // - CLONE_SIGHAND implies CLONE_THREAD.
+ //
+ // - Only CLONE_VM requires that the caller is not sharing its address
+ // space with another thread. CLONE_SIGHAND requires that the caller is not
+ // sharing its signal handlers, and CLONE_THREAD requires that the caller
+ // is the only thread in its thread group.
//
// Since we don't count the number of tasks using each address space or set
- // of signal handlers, we reject NewSignalHandlers and NewAddressSpace
- // altogether, and interpret NewThreadGroup as requiring that t be the only
- // member of its thread group. This seems to be logically coherent, in the
- // sense that clone(2) allows a task to share signal handlers and address
- // spaces with tasks in other thread groups.
- if opts.NewAddressSpace || opts.NewSignalHandlers {
- return syserror.EINVAL
+ // of signal handlers, we reject CLONE_VM and CLONE_SIGHAND altogether.
+ if flags&(linux.CLONE_VM|linux.CLONE_SIGHAND) != 0 {
+ return linuxerr.EINVAL
}
creds := t.Credentials()
- if opts.NewThreadGroup {
+ if flags&linux.CLONE_THREAD != 0 {
t.tg.signalHandlers.mu.Lock()
if t.tg.tasksCount != 1 {
t.tg.signalHandlers.mu.Unlock()
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
t.tg.signalHandlers.mu.Unlock()
// This isn't racy because we're the only living task, and therefore
// the only task capable of creating new ones, in our thread group.
}
- if opts.NewUserNamespace {
+ if flags&linux.CLONE_NEWUSER != 0 {
if t.IsChrooted() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
newUserNS, err := creds.NewChildUserNamespace()
if err != nil {
@@ -492,34 +431,34 @@ func (t *Task) Unshare(opts *SharingOptions) error {
creds = t.Credentials()
}
haveCapSysAdmin := t.HasCapability(linux.CAP_SYS_ADMIN)
- if opts.NewPIDNamespace {
+ if flags&linux.CLONE_NEWPID != 0 {
if !haveCapSysAdmin {
- return syserror.EPERM
+ return linuxerr.EPERM
}
t.childPIDNamespace = t.tg.pidns.NewChild(t.UserNamespace())
}
t.mu.Lock()
// Can't defer unlock: DecRefs must occur without holding t.mu.
- if opts.NewNetworkNamespace {
+ if flags&linux.CLONE_NEWNET != 0 {
if !haveCapSysAdmin {
t.mu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
t.netns = inet.NewNamespace(t.netns)
}
- if opts.NewUTSNamespace {
+ if flags&linux.CLONE_NEWUTS != 0 {
if !haveCapSysAdmin {
t.mu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Note that this must happen after NewUserNamespace, so the
// new user namespace is used if there is one.
t.utsns = t.utsns.Clone(creds.UserNamespace)
}
- if opts.NewIPCNamespace {
+ if flags&linux.CLONE_NEWIPC != 0 {
if !haveCapSysAdmin {
t.mu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Note that "If CLONE_NEWIPC is set, then create the process in a new IPC
// namespace"
@@ -527,12 +466,12 @@ func (t *Task) Unshare(opts *SharingOptions) error {
t.ipcns = NewIPCNamespace(creds.UserNamespace)
}
var oldFDTable *FDTable
- if opts.NewFiles {
+ if flags&linux.CLONE_FILES != 0 {
oldFDTable = t.fdTable
t.fdTable = oldFDTable.Fork(t)
}
var oldFSContext *FSContext
- if opts.NewFSContext {
+ if flags&linux.CLONE_FS != 0 {
oldFSContext = t.fsContext
t.fsContext = oldFSContext.Fork()
}
diff --git a/pkg/sentry/kernel/task_exec.go b/pkg/sentry/kernel/task_exec.go
index cf8571262..db91fc4d8 100644
--- a/pkg/sentry/kernel/task_exec.go
+++ b/pkg/sentry/kernel/task_exec.go
@@ -66,10 +66,10 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// execStop is a TaskStop that a task sets on itself when it wants to execve
@@ -97,7 +97,7 @@ func (t *Task) Execve(newImage *TaskImage) (*SyscallControl, error) {
// We lost to a racing group-exit, kill, or exec from another thread
// and should just exit.
newImage.release()
- return nil, syserror.EINTR
+ return nil, linuxerr.EINTR
}
// Cancel any racing group stops.
@@ -222,9 +222,15 @@ func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState {
// Update credentials to reflect the execve. This should precede switching
// MMs to ensure that dumpability has been reset first, if needed.
t.updateCredsForExecLocked()
- t.image.release()
+ oldImage := t.image
t.image = *r.image
t.mu.Unlock()
+
+ // Don't hold t.mu while calling t.image.release(), that may
+ // attempt to acquire TaskImage.MemoryManager.mappingMu, a lock order
+ // violation.
+ oldImage.release()
+
t.unstopVforkParent()
t.p.FullStateChanged()
// NOTE(b/30316266): All locks must be dropped prior to calling Activate.
diff --git a/pkg/sentry/kernel/task_exit.go b/pkg/sentry/kernel/task_exit.go
index d115b8783..b3931445b 100644
--- a/pkg/sentry/kernel/task_exit.go
+++ b/pkg/sentry/kernel/task_exit.go
@@ -28,66 +28,14 @@ import (
"errors"
"fmt"
"strconv"
- "strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/waiter"
)
-// An ExitStatus is a value communicated from an exiting task or thread group
-// to the party that reaps it.
-//
-// +stateify savable
-type ExitStatus struct {
- // Code is the numeric value passed to the call to exit or exit_group that
- // caused the exit. If the exit was not caused by such a call, Code is 0.
- Code int
-
- // Signo is the signal that caused the exit. If the exit was not caused by
- // a signal, Signo is 0.
- Signo int
-}
-
-func (es ExitStatus) String() string {
- var b strings.Builder
- if code := es.Code; code != 0 {
- if b.Len() != 0 {
- b.WriteByte(' ')
- }
- _, _ = fmt.Fprintf(&b, "Code=%d", code)
- }
- if signal := es.Signo; signal != 0 {
- if b.Len() != 0 {
- b.WriteByte(' ')
- }
- _, _ = fmt.Fprintf(&b, "Signal=%d", signal)
- }
- return b.String()
-}
-
-// Signaled returns true if the ExitStatus indicates that the exiting task or
-// thread group was killed by a signal.
-func (es ExitStatus) Signaled() bool {
- return es.Signo != 0
-}
-
-// Status returns the numeric representation of the ExitStatus returned by e.g.
-// the wait4() system call.
-func (es ExitStatus) Status() uint32 {
- return ((uint32(es.Code) & 0xff) << 8) | (uint32(es.Signo) & 0xff)
-}
-
-// ShellExitCode returns the numeric exit code that Bash would return for an
-// exit status of es.
-func (es ExitStatus) ShellExitCode() int {
- if es.Signaled() {
- return 128 + es.Signo
- }
- return es.Code
-}
-
// TaskExitState represents a step in the task exit path.
//
// "Exiting" and "exited" are often ambiguous; prefer to name specific states.
@@ -163,13 +111,13 @@ func (t *Task) killedLocked() bool {
return t.pendingSignals.pendingSet&linux.SignalSetOf(linux.SIGKILL) != 0
}
-// PrepareExit indicates an exit with status es.
+// PrepareExit indicates an exit with the given status.
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) PrepareExit(es ExitStatus) {
+func (t *Task) PrepareExit(ws linux.WaitStatus) {
t.tg.signalHandlers.mu.Lock()
defer t.tg.signalHandlers.mu.Unlock()
- t.exitStatus = es
+ t.exitStatus = ws
}
// PrepareGroupExit indicates a group exit with status es to t's thread group.
@@ -180,7 +128,7 @@ func (t *Task) PrepareExit(es ExitStatus) {
// ptrace.)
//
// Preconditions: The caller must be running on the task goroutine.
-func (t *Task) PrepareGroupExit(es ExitStatus) {
+func (t *Task) PrepareGroupExit(ws linux.WaitStatus) {
t.tg.signalHandlers.mu.Lock()
defer t.tg.signalHandlers.mu.Unlock()
if t.tg.exiting || t.tg.execing != nil {
@@ -198,8 +146,8 @@ func (t *Task) PrepareGroupExit(es ExitStatus) {
return
}
t.tg.exiting = true
- t.tg.exitStatus = es
- t.exitStatus = es
+ t.tg.exitStatus = ws
+ t.exitStatus = ws
for sibling := t.tg.tasks.Front(); sibling != nil; sibling = sibling.Next() {
if sibling != t {
sibling.killLocked()
@@ -207,11 +155,11 @@ func (t *Task) PrepareGroupExit(es ExitStatus) {
}
}
-// Kill requests that all tasks in ts exit as if group exiting with status es.
+// Kill requests that all tasks in ts exit as if group exiting with status ws.
// Kill does not wait for tasks to exit.
//
// Kill has no analogue in Linux; it's provided for save/restore only.
-func (ts *TaskSet) Kill(es ExitStatus) {
+func (ts *TaskSet) Kill(ws linux.WaitStatus) {
ts.mu.Lock()
defer ts.mu.Unlock()
ts.Root.exiting = true
@@ -219,7 +167,7 @@ func (ts *TaskSet) Kill(es ExitStatus) {
t.tg.signalHandlers.mu.Lock()
if !t.tg.exiting {
t.tg.exiting = true
- t.tg.exitStatus = es
+ t.tg.exitStatus = ws
}
t.killLocked()
t.tg.signalHandlers.mu.Unlock()
@@ -282,9 +230,16 @@ func (*runExitMain) execute(t *Task) taskRunState {
t.tg.pidns.owner.mu.Lock()
t.updateRSSLocked()
t.tg.pidns.owner.mu.Unlock()
+
+ // Release the task image resources. Accessing these fields must be
+ // done with t.mu held, but the mm.DecUsers() call must be done outside
+ // of that lock.
t.mu.Lock()
- t.image.release()
+ mm := t.image.MemoryManager
+ t.image.MemoryManager = nil
+ t.image.fu = nil
t.mu.Unlock()
+ mm.DecUsers(t)
// Releasing the MM unblocks a blocked CLONE_VFORK parent.
t.unstopVforkParent()
@@ -730,10 +685,10 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *linux.S
info.SetUID(int32(t.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow()))
if t.exitStatus.Signaled() {
info.Code = linux.CLD_KILLED
- info.SetStatus(int32(t.exitStatus.Signo))
+ info.SetStatus(int32(t.exitStatus.TerminationSignal()))
} else {
info.Code = linux.CLD_EXITED
- info.SetStatus(int32(t.exitStatus.Code))
+ info.SetStatus(int32(t.exitStatus.ExitStatus()))
}
// TODO(b/72102453): Set utime, stime.
return info
@@ -741,7 +696,7 @@ func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *linux.S
// ExitStatus returns t's exit status, which is only guaranteed to be
// meaningful if t.ExitState() != TaskExitNone.
-func (t *Task) ExitStatus() ExitStatus {
+func (t *Task) ExitStatus() linux.WaitStatus {
t.tg.pidns.owner.mu.RLock()
defer t.tg.pidns.owner.mu.RUnlock()
t.tg.signalHandlers.mu.Lock()
@@ -751,7 +706,7 @@ func (t *Task) ExitStatus() ExitStatus {
// ExitStatus returns the exit status that would be returned by a consuming
// wait*() on tg.
-func (tg *ThreadGroup) ExitStatus() ExitStatus {
+func (tg *ThreadGroup) ExitStatus() linux.WaitStatus {
tg.pidns.owner.mu.RLock()
defer tg.pidns.owner.mu.RUnlock()
tg.signalHandlers.mu.Lock()
@@ -762,7 +717,9 @@ func (tg *ThreadGroup) ExitStatus() ExitStatus {
return tg.leader.exitStatus
}
-// TerminationSignal returns the thread group's termination signal.
+// TerminationSignal returns the thread group's termination signal, which is
+// the signal that will be sent to its leader's parent when all threads have
+// exited.
func (tg *ThreadGroup) TerminationSignal() linux.Signal {
tg.pidns.owner.mu.RLock()
defer tg.pidns.owner.mu.RUnlock()
@@ -888,8 +845,8 @@ type WaitResult struct {
// Event is exactly one of the events defined above.
Event waiter.EventMask
- // Status is the numeric status associated with the event.
- Status uint32
+ // Status is the wait status associated with the event.
+ Status linux.WaitStatus
}
// Wait waits for an event from a thread group that is a child of t's thread
@@ -909,7 +866,7 @@ func (t *Task) Wait(opts *WaitOptions) (*WaitResult, error) {
return wr, err
}
if err := t.Block(ch); err != nil {
- return wr, syserror.ConvertIntr(err, opts.BlockInterruptErr)
+ return wr, syserr.ConvertIntr(err, opts.BlockInterruptErr)
}
}
}
@@ -942,7 +899,7 @@ func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) {
if anyWaitableTasks {
return nil, ErrNoWaitableEvent
}
- return nil, syserror.ECHILD
+ return nil, linuxerr.ECHILD
}
// Preconditions: The TaskSet mutex must be locked for writing.
@@ -1042,7 +999,7 @@ func (t *Task) waitCollectZombieLocked(target *Task, opts *WaitOptions, asPtrace
}
pid := t.tg.pidns.tids[target]
uid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow()
- status := target.exitStatus.Status()
+ status := target.exitStatus
if !opts.ConsumeEvent {
return &WaitResult{
Task: target,
@@ -1056,7 +1013,7 @@ func (t *Task) waitCollectZombieLocked(target *Task, opts *WaitOptions, asPtrace
// differ from that reported by a consuming wait; the latter will return
// the group exit code if one is available.
if target.tg.exiting {
- status = target.tg.exitStatus.Status()
+ status = target.tg.exitStatus
}
// t may be (in the thread group of) target's parent, tracer, or both. We
// don't need to check for !exitTracerAcked because tracees are detached
@@ -1122,12 +1079,11 @@ func (t *Task) waitCollectChildGroupStopLocked(target *Task, opts *WaitOptions)
target.tg.groupStopWaitable = false
}
return &WaitResult{
- Task: target,
- TID: pid,
- UID: uid,
- Event: EventChildGroupStop,
- // There is no name for these status constants.
- Status: (uint32(sig)&0xff)<<8 | 0x7f,
+ Task: target,
+ TID: pid,
+ UID: uid,
+ Event: EventChildGroupStop,
+ Status: linux.WaitStatusStopped(uint32(sig)),
}
}
@@ -1148,7 +1104,7 @@ func (t *Task) waitCollectGroupContinueLocked(target *Task, opts *WaitOptions) *
TID: pid,
UID: uid,
Event: EventGroupContinue,
- Status: 0xffff,
+ Status: linux.WaitStatusContinued(),
}
}
@@ -1176,7 +1132,7 @@ func (t *Task) waitCollectTraceeStopLocked(target *Task, opts *WaitOptions) *Wai
TID: pid,
UID: uid,
Event: EventTraceeStop,
- Status: uint32(code)<<8 | 0x7f,
+ Status: linux.WaitStatusStopped(uint32(code)),
}
}
diff --git a/pkg/sentry/kernel/task_identity.go b/pkg/sentry/kernel/task_identity.go
index 0325967e4..a9067b682 100644
--- a/pkg/sentry/kernel/task_identity.go
+++ b/pkg/sentry/kernel/task_identity.go
@@ -16,9 +16,9 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Credentials returns t's credentials.
@@ -47,7 +47,7 @@ func (t *Task) HasCapability(cp linux.Capability) bool {
func (t *Task) SetUID(uid auth.UID) error {
// setuid considers -1 to be invalid.
if !uid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
t.mu.Lock()
@@ -56,7 +56,7 @@ func (t *Task) SetUID(uid auth.UID) error {
creds := t.Credentials()
kuid := creds.UserNamespace.MapToKUID(uid)
if !kuid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// "setuid() sets the effective user ID of the calling process. If the
// effective UID of the caller is root (more precisely: if the caller has
@@ -70,7 +70,7 @@ func (t *Task) SetUID(uid auth.UID) error {
// capability) and uid does not match the real UID or saved set-user-ID of
// the calling process."
if kuid != creds.RealKUID && kuid != creds.SavedKUID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
t.setKUIDsUncheckedLocked(creds.RealKUID, kuid, creds.SavedKUID)
return nil
@@ -87,26 +87,26 @@ func (t *Task) SetREUID(r, e auth.UID) error {
if r.Ok() {
newR = creds.UserNamespace.MapToKUID(r)
if !newR.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
newE := creds.EffectiveKUID
if e.Ok() {
newE = creds.UserNamespace.MapToKUID(e)
if !newE.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
if !creds.HasCapability(linux.CAP_SETUID) {
// "Unprivileged processes may only set the effective user ID to the
// real user ID, the effective user ID, or the saved set-user-ID."
if newE != creds.RealKUID && newE != creds.EffectiveKUID && newE != creds.SavedKUID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "Unprivileged users may only set the real user ID to the real user
// ID or the effective user ID."
if newR != creds.RealKUID && newR != creds.EffectiveKUID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
// "If the real user ID is set (i.e., ruid is not -1) or the effective user
@@ -223,7 +223,7 @@ func (t *Task) setKUIDsUncheckedLocked(newR, newE, newS auth.KUID) {
// SetGID implements the semantics of setgid(2).
func (t *Task) SetGID(gid auth.GID) error {
if !gid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
t.mu.Lock()
@@ -232,14 +232,14 @@ func (t *Task) SetGID(gid auth.GID) error {
creds := t.Credentials()
kgid := creds.UserNamespace.MapToKGID(gid)
if !kgid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if creds.HasCapability(linux.CAP_SETGID) {
t.setKGIDsUncheckedLocked(kgid, kgid, kgid)
return nil
}
if kgid != creds.RealKGID && kgid != creds.SavedKGID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
t.setKGIDsUncheckedLocked(creds.RealKGID, kgid, creds.SavedKGID)
return nil
@@ -255,22 +255,22 @@ func (t *Task) SetREGID(r, e auth.GID) error {
if r.Ok() {
newR = creds.UserNamespace.MapToKGID(r)
if !newR.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
newE := creds.EffectiveKGID
if e.Ok() {
newE = creds.UserNamespace.MapToKGID(e)
if !newE.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
if !creds.HasCapability(linux.CAP_SETGID) {
if newE != creds.RealKGID && newE != creds.EffectiveKGID && newE != creds.SavedKGID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if newR != creds.RealKGID && newR != creds.EffectiveKGID {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
newS := creds.SavedKGID
@@ -343,13 +343,13 @@ func (t *Task) SetExtraGIDs(gids []auth.GID) error {
defer t.mu.Unlock()
creds := t.Credentials()
if !creds.HasCapability(linux.CAP_SETGID) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
kgids := make([]auth.KGID, len(gids))
for i, gid := range gids {
kgid := creds.UserNamespace.MapToKGID(gid)
if !kgid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
kgids[i] = kgid
}
@@ -367,25 +367,25 @@ func (t *Task) SetCapabilitySets(permitted, inheritable, effective auth.Capabili
// "Permitted: This is a limiting superset for the effective capabilities
// that the thread may assume." - capabilities(7)
if effective & ^permitted != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
creds := t.Credentials()
// "It is also a limiting superset for the capabilities that may be added
// to the inheritable set by a thread that does not have the CAP_SETPCAP
// capability in its effective set."
if !creds.HasCapability(linux.CAP_SETPCAP) && (inheritable & ^(creds.InheritableCaps|creds.PermittedCaps) != 0) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "If a thread drops a capability from its permitted set, it can never
// reacquire that capability (unless it execve(2)s ..."
if permitted & ^creds.PermittedCaps != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// "... if a capability is not in the bounding set, then a thread can't add
// this capability to its inheritable set, even if it was in its permitted
// capabilities ..."
if inheritable & ^(creds.InheritableCaps|creds.BoundingCaps) != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
creds = creds.Fork() // The credentials object is immutable. See doc for creds.
creds.PermittedCaps = permitted
@@ -402,7 +402,7 @@ func (t *Task) DropBoundingCapability(cp linux.Capability) error {
defer t.mu.Unlock()
creds := t.Credentials()
if !creds.HasCapability(linux.CAP_SETPCAP) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
creds = creds.Fork() // The credentials object is immutable. See doc for creds.
creds.BoundingCaps &^= auth.CapabilitySetOf(cp)
@@ -422,7 +422,7 @@ func (t *Task) SetUserNamespace(ns *auth.UserNamespace) error {
// If t just created ns, then t.creds is guaranteed to have CAP_SYS_ADMIN
// in ns (by rule 3 in auth.Credentials.HasCapability).
if !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, ns) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
creds = creds.Fork() // The credentials object is immutable. See doc for creds.
diff --git a/pkg/sentry/kernel/task_image.go b/pkg/sentry/kernel/task_image.go
index bd5543d4e..6002ffb42 100644
--- a/pkg/sentry/kernel/task_image.go
+++ b/pkg/sentry/kernel/task_image.go
@@ -17,7 +17,7 @@ package kernel
import (
"fmt"
- "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/abi/linux/errno"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -27,7 +27,7 @@ import (
"gvisor.dev/gvisor/pkg/syserr"
)
-var errNoSyscalls = syserr.New("no syscall table found", linux.ENOEXEC)
+var errNoSyscalls = syserr.New("no syscall table found", errno.ENOEXEC)
// Auxmap contains miscellaneous data for the task.
type Auxmap map[string]interface{}
@@ -53,7 +53,7 @@ type TaskImage struct {
}
// release releases all resources held by the TaskImage. release is called by
-// the task when it execs into a new TaskImage or exits.
+// the task when it execs into a new TaskImage.
func (image *TaskImage) release() {
// Nil out pointers so that if the task is saved after release, it doesn't
// follow the pointers to possibly now-invalid objects.
diff --git a/pkg/sentry/kernel/task_log.go b/pkg/sentry/kernel/task_log.go
index 72b9a0384..c5b099559 100644
--- a/pkg/sentry/kernel/task_log.go
+++ b/pkg/sentry/kernel/task_log.go
@@ -235,7 +235,7 @@ func (t *Task) traceExitEvent() {
if !trace.IsEnabled() {
return
}
- trace.Logf(t.traceContext, traceCategory, "exit status: 0x%x", t.exitStatus.Status())
+ trace.Logf(t.traceContext, traceCategory, "exit status: %s", t.exitStatus)
}
// traceExecEvent is called when a task calls exec.
@@ -249,5 +249,9 @@ func (t *Task) traceExecEvent(image *TaskImage) {
return
}
defer file.DecRef(t)
- trace.Logf(t.traceContext, traceCategory, "exec: %s", file.PathnameWithDeleted(t))
+
+ // traceExecEvent function may be called before the task goroutine
+ // starts, so we must use the async context.
+ name := file.PathnameWithDeleted(t.AsyncContext())
+ trace.Logf(t.traceContext, traceCategory, "exec: %s", name)
}
diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go
index 068f25af1..7b336a46b 100644
--- a/pkg/sentry/kernel/task_run.go
+++ b/pkg/sentry/kernel/task_run.go
@@ -22,6 +22,7 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/goid"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -29,7 +30,6 @@ import (
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A taskRunState is a reified state in the task state machine. See README.md
@@ -197,8 +197,8 @@ func (app *runApp) execute(t *Task) taskRunState {
// a pending signal, causing another interruption, but that signal should
// not interact with the interrupted syscall.)
if t.haveSyscallReturn {
- if sre, ok := syserror.SyscallRestartErrnoFromReturn(t.Arch().Return()); ok {
- if sre == syserror.ERESTART_RESTARTBLOCK {
+ if sre, ok := linuxerr.SyscallRestartErrorFromReturn(t.Arch().Return()); ok {
+ if sre == linuxerr.ERESTART_RESTARTBLOCK {
t.Debugf("Restarting syscall %d with restart block after errno %d: not interrupted by handled signal", t.Arch().SyscallNo(), sre)
t.Arch().RestartSyscallWithRestartBlock()
} else {
@@ -377,7 +377,7 @@ func (app *runApp) execute(t *Task) taskRunState {
default:
// What happened? Can't continue.
t.Warningf("Unexpected SwitchToApp error: %v", err)
- t.PrepareExit(ExitStatus{Code: ExtractErrno(err, -1)})
+ t.PrepareExit(linux.WaitStatusExit(int32(ExtractErrno(err, -1))))
return (*runExit)(nil)
}
}
diff --git a/pkg/sentry/kernel/task_sched.go b/pkg/sentry/kernel/task_sched.go
index f142feab4..9d9fa76a6 100644
--- a/pkg/sentry/kernel/task_sched.go
+++ b/pkg/sentry/kernel/task_sched.go
@@ -23,12 +23,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/hostcpu"
"gvisor.dev/gvisor/pkg/sentry/kernel/sched"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// TaskGoroutineState is a coarse representation of the current execution
@@ -601,7 +601,7 @@ func (t *Task) SetCPUMask(mask sched.CPUSet) error {
// Ensure that at least 1 CPU is still allowed.
if mask.NumCPUs() == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if t.k.useHostCores {
diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go
index 8ca61ed48..eeb3c5e69 100644
--- a/pkg/sentry/kernel/task_signals.go
+++ b/pkg/sentry/kernel/task_signals.go
@@ -22,12 +22,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/eventchannel"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ucspb "gvisor.dev/gvisor/pkg/sentry/kernel/uncaught_signal_go_proto"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -156,10 +156,11 @@ func (t *Task) PendingSignals() linux.SignalSet {
// deliverSignal delivers the given signal and returns the following run state.
func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRunState {
- sigact := computeAction(linux.Signal(info.Signo), act)
+ sig := linux.Signal(info.Signo)
+ sigact := computeAction(sig, act)
if t.haveSyscallReturn {
- if sre, ok := syserror.SyscallRestartErrnoFromReturn(t.Arch().Return()); ok {
+ if sre, ok := linuxerr.SyscallRestartErrorFromReturn(t.Arch().Return()); ok {
// Signals that are ignored, cause a thread group stop, or
// terminate the thread group do not interact with interrupted
// syscalls; in Linux terms, they are never returned to the signal
@@ -168,13 +169,13 @@ func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRu
// signal that is actually handled (by userspace).
if sigact == SignalActionHandler {
switch {
- case sre == syserror.ERESTARTNOHAND:
+ case sre == linuxerr.ERESTARTNOHAND:
fallthrough
- case sre == syserror.ERESTART_RESTARTBLOCK:
+ case sre == linuxerr.ERESTART_RESTARTBLOCK:
fallthrough
- case (sre == syserror.ERESTARTSYS && act.Flags&linux.SA_RESTART == 0):
+ case (sre == linuxerr.ERESTARTSYS && act.Flags&linux.SA_RESTART == 0):
t.Debugf("Not restarting syscall %d after errno %d: interrupted by signal %d", t.Arch().SyscallNo(), sre, info.Signo)
- t.Arch().SetReturn(uintptr(-ExtractErrno(syserror.EINTR, -1)))
+ t.Arch().SetReturn(uintptr(-ExtractErrno(linuxerr.EINTR, -1)))
default:
t.Debugf("Restarting syscall %d after errno %d: interrupted by signal %d", t.Arch().SyscallNo(), sre, info.Signo)
t.Arch().RestartSyscall()
@@ -197,14 +198,14 @@ func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRu
}
// Attach an fault address if appropriate.
- switch linux.Signal(info.Signo) {
+ switch sig {
case linux.SIGSEGV, linux.SIGFPE, linux.SIGILL, linux.SIGTRAP, linux.SIGBUS:
ucs.FaultAddr = info.Addr()
}
eventchannel.Emit(ucs)
- t.PrepareGroupExit(ExitStatus{Signo: int(info.Signo)})
+ t.PrepareGroupExit(linux.WaitStatusTerminationSignal(sig))
return (*runExit)(nil)
case SignalActionStop:
@@ -224,12 +225,12 @@ func (t *Task) deliverSignal(info *linux.SignalInfo, act linux.SigAction) taskRu
// Send a forced SIGSEGV. If the signal that couldn't be delivered
// was a SIGSEGV, force the handler to SIG_DFL.
- t.forceSignal(linux.SIGSEGV, linux.Signal(info.Signo) == linux.SIGSEGV /* unconditional */)
+ t.forceSignal(linux.SIGSEGV, sig == linux.SIGSEGV /* unconditional */)
t.SendSignal(SignalInfoPriv(linux.SIGSEGV))
}
default:
- panic(fmt.Sprintf("Unknown signal action %+v, %d?", info, computeAction(linux.Signal(info.Signo), act)))
+ panic(fmt.Sprintf("Unknown signal action %+v, %d?", info, computeAction(sig, act)))
}
return (*runInterrupt)(nil)
}
@@ -338,7 +339,7 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
}
if timeout == 0 {
- return nil, syserror.EAGAIN
+ return nil, linuxerr.EAGAIN
}
// Unblock signals we're waiting for. Remember the original signal mask so
@@ -359,8 +360,8 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
if info := t.dequeueSignalLocked(mask); info != nil {
return info, nil
}
- if err == syserror.ETIMEDOUT {
- return nil, syserror.EAGAIN
+ if err == linuxerr.ETIMEDOUT {
+ return nil, linuxerr.EAGAIN
}
return nil, err
}
@@ -369,9 +370,9 @@ func (t *Task) Sigtimedwait(set linux.SignalSet, timeout time.Duration) (*linux.
//
// The following errors may be returned:
//
-// syserror.ESRCH - The task has exited.
-// syserror.EINVAL - The signal is not valid.
-// syserror.EAGAIN - THe signal is realtime, and cannot be queued.
+// linuxerr.ESRCH - The task has exited.
+// linuxerr.EINVAL - The signal is not valid.
+// linuxerr.EAGAIN - THe signal is realtime, and cannot be queued.
//
func (t *Task) SendSignal(info *linux.SignalInfo) error {
t.tg.pidns.owner.mu.RLock()
@@ -406,14 +407,14 @@ func (t *Task) sendSignalLocked(info *linux.SignalInfo, group bool) error {
func (t *Task) sendSignalTimerLocked(info *linux.SignalInfo, group bool, timer *IntervalTimer) error {
if t.exitState == TaskExitDead {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
sig := linux.Signal(info.Signo)
if sig == 0 {
return nil
}
if !sig.IsValid() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Signal side effects apply even if the signal is ultimately discarded.
@@ -450,7 +451,7 @@ func (t *Task) sendSignalTimerLocked(info *linux.SignalInfo, group bool, timer *
}
if !q.enqueue(info, timer) {
if sig.IsRealtime() {
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
t.Debugf("Discarding duplicate signal %d", sig)
if timer != nil {
@@ -505,7 +506,7 @@ func (tg *ThreadGroup) applySignalSideEffectsLocked(sig linux.Signal) {
// ignores tg.execing.
if !tg.exiting {
tg.exiting = true
- tg.exitStatus = ExitStatus{Signo: int(linux.SIGKILL)}
+ tg.exitStatus = linux.WaitStatusTerminationSignal(linux.SIGKILL)
}
for t := tg.tasks.Front(); t != nil; t = t.Next() {
t.killLocked()
@@ -684,7 +685,7 @@ func (t *Task) SetSignalStack(alt linux.SignalStack) bool {
// to *actptr (if actptr is not nil) and returns the old signal action.
func (tg *ThreadGroup) SetSigAction(sig linux.Signal, actptr *linux.SigAction) (linux.SigAction, error) {
if !sig.IsValid() {
- return linux.SigAction{}, syserror.EINVAL
+ return linux.SigAction{}, linuxerr.EINVAL
}
tg.pidns.owner.mu.RLock()
@@ -695,7 +696,7 @@ func (tg *ThreadGroup) SetSigAction(sig linux.Signal, actptr *linux.SigAction) (
oldact := sh.actions[sig]
if actptr != nil {
if sig == linux.SIGKILL || sig == linux.SIGSTOP {
- return oldact, syserror.EINVAL
+ return oldact, linuxerr.EINVAL
}
act := *actptr
diff --git a/pkg/sentry/kernel/task_start.go b/pkg/sentry/kernel/task_start.go
index 41fd2d471..217c6f531 100644
--- a/pkg/sentry/kernel/task_start.go
+++ b/pkg/sentry/kernel/task_start.go
@@ -17,6 +17,7 @@ package kernel
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -24,7 +25,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/sched"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// TaskConfig defines the configuration of a new Task (see below).
@@ -169,7 +169,7 @@ func (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) {
// doesn't matter too much since the caller will exit before it returns
// to userspace. If the caller isn't in the same thread group, then
// we're in uncharted territory and can return whatever we want.
- return nil, syserror.EINTR
+ return nil, linuxerr.EINTR
}
if err := ts.assignTIDsLocked(t); err != nil {
return nil, err
@@ -267,7 +267,7 @@ func (ns *PIDNamespace) allocateTID() (ThreadID, error) {
// fail with the error ENOMEM; it is not possible to create a new
// processes [sic] in a PID namespace whose init process has
// terminated." - pid_namespaces(7)
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
tid := ns.last
for {
@@ -299,7 +299,7 @@ func (ns *PIDNamespace) allocateTID() (ThreadID, error) {
// Did we do a full cycle?
if tid == ns.last {
// No tid available.
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
}
diff --git a/pkg/sentry/kernel/task_syscall.go b/pkg/sentry/kernel/task_syscall.go
index 601fc0d3a..2b1d7e114 100644
--- a/pkg/sentry/kernel/task_syscall.go
+++ b/pkg/sentry/kernel/task_syscall.go
@@ -22,12 +22,13 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
+ "gvisor.dev/gvisor/pkg/errors"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
)
// SyscallRestartBlock represents the restart block for a syscall restartable
@@ -159,7 +160,7 @@ func (t *Task) doSyscall() taskRunState {
// ok
case linux.SECCOMP_RET_KILL_THREAD:
t.Debugf("Syscall %d: killed by seccomp", sysno)
- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})
+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))
return (*runExit)(nil)
case linux.SECCOMP_RET_TRACE:
t.Debugf("Syscall %d: stopping for PTRACE_EVENT_SECCOMP", sysno)
@@ -309,7 +310,7 @@ func (t *Task) doVsyscall(addr hostarch.Addr, sysno uintptr) taskRunState {
return &runVsyscallAfterPtraceEventSeccomp{addr, sysno, caller}
case linux.SECCOMP_RET_KILL_THREAD:
t.Debugf("vsyscall %d: killed by seccomp", sysno)
- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})
+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))
return (*runExit)(nil)
default:
panic(fmt.Sprintf("Unknown seccomp result %d", r))
@@ -336,7 +337,7 @@ func (r *runVsyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState {
// Documentation/prctl/seccomp_filter.txt. On Linux, changing orig_ax or ip
// causes do_exit(SIGSYS), and changing sp is ignored.
if (sysno != ^uintptr(0) && sysno != r.sysno) || hostarch.Addr(t.Arch().IP()) != r.addr {
- t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)})
+ t.PrepareExit(linux.WaitStatusTerminationSignal(linux.SIGSYS))
return (*runExit)(nil)
}
if sysno == ^uintptr(0) {
@@ -357,7 +358,7 @@ func (t *Task) doVsyscallInvoke(sysno uintptr, args arch.SyscallArguments, calle
t.Arch().SetReturn(uintptr(rval))
} else {
t.Debugf("vsyscall %d, caller %x: emulated syscall returned error: %v", sysno, t.Arch().Value(caller), err)
- if err == syserror.EFAULT {
+ if linuxerr.Equals(linuxerr.EFAULT, err) {
t.forceSignal(linux.SIGSEGV, false /* unconditional */)
t.SendSignal(SignalInfoPriv(linux.SIGSEGV))
// A return is not emulated in this case.
@@ -379,8 +380,8 @@ func ExtractErrno(err error, sysno int) int {
return 0
case unix.Errno:
return int(err)
- case syserror.SyscallRestartErrno:
- return int(err)
+ case *errors.Error:
+ return int(err.Errno())
case *memmap.BusError:
// Bus errors may generate SIGBUS, but for syscalls they still
// return EFAULT. See case in task_run.go where the fault is
@@ -393,8 +394,8 @@ func ExtractErrno(err error, sysno int) int {
case *os.SyscallError:
return ExtractErrno(err.Err, sysno)
default:
- if errno, ok := syserror.TranslateError(err); ok {
- return int(errno)
+ if errno, ok := linuxerr.TranslateError(err); ok {
+ return int(errno.Errno())
}
}
panic(fmt.Sprintf("Unknown syscall %d error: %v", sysno, err))
diff --git a/pkg/sentry/kernel/task_usermem.go b/pkg/sentry/kernel/task_usermem.go
index fc6d9438a..bff226a11 100644
--- a/pkg/sentry/kernel/task_usermem.go
+++ b/pkg/sentry/kernel/task_usermem.go
@@ -19,9 +19,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -104,7 +104,7 @@ func (t *Task) CopyInVector(addr hostarch.Addr, maxElemSize, maxTotalSize int) (
// Each string has a zero terminating byte counted, so copying out a string
// requires at least one byte of space. Also, see the calculation below.
if maxTotalSize <= 0 {
- return nil, syserror.ENOMEM
+ return nil, linuxerr.ENOMEM
}
thisMax := maxElemSize
if maxTotalSize < thisMax {
@@ -132,7 +132,7 @@ func (t *Task) CopyOutIovecs(addr hostarch.Addr, src hostarch.AddrRangeSeq) erro
case 8:
const itemLen = 16
if _, ok := addr.AddLength(uint64(src.NumRanges()) * itemLen); !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
b := t.CopyScratchBuffer(itemLen)
@@ -147,7 +147,7 @@ func (t *Task) CopyOutIovecs(addr hostarch.Addr, src hostarch.AddrRangeSeq) erro
}
default:
- return syserror.ENOSYS
+ return linuxerr.ENOSYS
}
return nil
@@ -190,7 +190,7 @@ func (t *Task) CopyInIovecs(addr hostarch.Addr, numIovecs int) (hostarch.AddrRan
case 8:
const itemLen = 16
if _, ok := addr.AddLength(uint64(numIovecs) * itemLen); !ok {
- return hostarch.AddrRangeSeq{}, syserror.EFAULT
+ return hostarch.AddrRangeSeq{}, linuxerr.EFAULT
}
b := t.CopyScratchBuffer(itemLen)
@@ -202,11 +202,11 @@ func (t *Task) CopyInIovecs(addr hostarch.Addr, numIovecs int) (hostarch.AddrRan
base := hostarch.Addr(hostarch.ByteOrder.Uint64(b[0:8]))
length := hostarch.ByteOrder.Uint64(b[8:16])
if length > math.MaxInt64 {
- return hostarch.AddrRangeSeq{}, syserror.EINVAL
+ return hostarch.AddrRangeSeq{}, linuxerr.EINVAL
}
ar, ok := t.MemoryManager().CheckIORange(base, int64(length))
if !ok {
- return hostarch.AddrRangeSeq{}, syserror.EFAULT
+ return hostarch.AddrRangeSeq{}, linuxerr.EFAULT
}
if numIovecs == 1 {
@@ -219,7 +219,7 @@ func (t *Task) CopyInIovecs(addr hostarch.Addr, numIovecs int) (hostarch.AddrRan
}
default:
- return hostarch.AddrRangeSeq{}, syserror.ENOSYS
+ return hostarch.AddrRangeSeq{}, linuxerr.ENOSYS
}
// Truncate to MAX_RW_COUNT.
@@ -252,7 +252,7 @@ func (t *Task) SingleIOSequence(addr hostarch.Addr, length int, opts usermem.IOO
}
ar, ok := t.MemoryManager().CheckIORange(addr, int64(length))
if !ok {
- return usermem.IOSequence{}, syserror.EFAULT
+ return usermem.IOSequence{}, linuxerr.EFAULT
}
return usermem.IOSequence{
IO: t.MemoryManager(),
@@ -270,7 +270,7 @@ func (t *Task) SingleIOSequence(addr hostarch.Addr, length int, opts usermem.IOO
// Preconditions: Same as Task.CopyInIovecs.
func (t *Task) IovecsIOSequence(addr hostarch.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) {
if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV {
- return usermem.IOSequence{}, syserror.EINVAL
+ return usermem.IOSequence{}, linuxerr.EINVAL
}
ars, err := t.CopyInIovecs(addr, iovcnt)
if err != nil {
@@ -312,7 +312,7 @@ func (cc *taskCopyContext) getMemoryManager() (*mm.MemoryManager, error) {
tmm := cc.t.MemoryManager()
cc.t.mu.Unlock()
if !tmm.IncUsers() {
- return nil, syserror.EFAULT
+ return nil, linuxerr.EFAULT
}
return tmm, nil
}
diff --git a/pkg/sentry/kernel/thread_group.go b/pkg/sentry/kernel/thread_group.go
index 891e2201d..5814a4eca 100644
--- a/pkg/sentry/kernel/thread_group.go
+++ b/pkg/sentry/kernel/thread_group.go
@@ -19,13 +19,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A ThreadGroup is a logical grouping of tasks that has widespread
@@ -143,7 +143,7 @@ type ThreadGroup struct {
//
// While exiting is false, exitStatus is protected by the signal mutex.
// When exiting becomes true, exitStatus becomes immutable.
- exitStatus ExitStatus
+ exitStatus linux.WaitStatus
// terminationSignal is the signal that this thread group's leader will
// send to its parent when it exits.
@@ -278,7 +278,7 @@ func (k *Kernel) NewThreadGroup(mntns *fs.MountNamespace, pidns *PIDNamespace, s
limits: limits,
mounts: mntns,
}
- tg.itimerRealTimer = ktime.NewTimer(k.monotonicClock, &itimerRealListener{tg: tg})
+ tg.itimerRealTimer = ktime.NewTimer(k.timekeeper.monotonicClock, &itimerRealListener{tg: tg})
tg.timers = make(map[linux.TimerID]*IntervalTimer)
tg.oldRSeqCritical.Store(&OldRSeqCriticalRegion{})
return tg
@@ -357,7 +357,7 @@ func (tg *ThreadGroup) SetControllingTTY(tty *TTY, steal bool, isReadable bool)
// "The calling process must be a session leader and not have a
// controlling terminal already." - tty_ioctl(4)
if tg.processGroup.session.leader != tg || tg.tty != nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(tg.leader)
@@ -371,7 +371,7 @@ func (tg *ThreadGroup) SetControllingTTY(tty *TTY, steal bool, isReadable bool)
if tty.tg != nil && tg.processGroup.session != tty.tg.processGroup.session {
// Stealing requires CAP_SYS_ADMIN in the root user namespace.
if !hasAdmin || !steal {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Steal the TTY away. Unlike TIOCNOTTY, don't send signals.
for othertg := range tg.pidns.owner.Root.tgids {
@@ -391,7 +391,7 @@ func (tg *ThreadGroup) SetControllingTTY(tty *TTY, steal bool, isReadable bool)
}
if !isReadable && !hasAdmin {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Set the controlling terminal and foreground process group.
@@ -419,7 +419,7 @@ func (tg *ThreadGroup) ReleaseControllingTTY(tty *TTY) error {
if tg.tty == nil || tg.tty != tty {
tg.signalHandlers.mu.Unlock()
- return syserror.ENOTTY
+ return linuxerr.ENOTTY
}
// "If the process was session leader, then send SIGHUP and SIGCONT to
@@ -473,7 +473,7 @@ func (tg *ThreadGroup) ForegroundProcessGroup(tty *TTY) (int32, error) {
// "When fd does not refer to the controlling terminal of the calling
// process, -1 is returned" - tcgetpgrp(3)
if tg.tty != tty {
- return -1, syserror.ENOTTY
+ return -1, linuxerr.ENOTTY
}
return int32(tg.processGroup.session.foreground.id), nil
@@ -489,31 +489,36 @@ func (tg *ThreadGroup) SetForegroundProcessGroup(tty *TTY, pgid ProcessGroupID)
tg.signalHandlers.mu.Lock()
defer tg.signalHandlers.mu.Unlock()
- // TODO(gvisor.dev/issue/6148): "If tcsetpgrp() is called by a member of a
- // background process group in its session, and the calling process is not
- // blocking or ignoring SIGTTOU, a SIGTTOU signal is sent to all members of
- // this background process group."
-
// tty must be the controlling terminal.
if tg.tty != tty {
- return -1, syserror.ENOTTY
+ return -1, linuxerr.ENOTTY
}
// pgid must be positive.
if pgid < 0 {
- return -1, syserror.EINVAL
+ return -1, linuxerr.EINVAL
}
// pg must not be empty. Empty process groups are removed from their
// pid namespaces.
pg, ok := tg.pidns.processGroups[pgid]
if !ok {
- return -1, syserror.ESRCH
+ return -1, linuxerr.ESRCH
}
// pg must be part of this process's session.
if tg.processGroup.session != pg.session {
- return -1, syserror.EPERM
+ return -1, linuxerr.EPERM
+ }
+
+ signalAction := tg.signalHandlers.actions[linux.SIGTTOU]
+ // If the calling process is a member of a background group, a SIGTTOU
+ // signal is sent to all members of this background process group.
+ // We need also need to check whether it is ignoring or blocking SIGTTOU.
+ ignored := signalAction.Handler == linux.SIG_IGN
+ blocked := tg.leader.signalMask == linux.SignalSetOf(linux.SIGTTOU)
+ if tg.processGroup.id != tg.processGroup.session.foreground.id && !ignored && !blocked {
+ tg.leader.sendSignalLocked(SignalInfoPriv(linux.SIGTTOU), true)
}
tg.processGroup.session.foreground.id = pgid
diff --git a/pkg/sentry/kernel/time/BUILD b/pkg/sentry/kernel/time/BUILD
index 2817aa3ba..e293d9a0f 100644
--- a/pkg/sentry/kernel/time/BUILD
+++ b/pkg/sentry/kernel/time/BUILD
@@ -13,8 +13,8 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/waiter",
],
)
diff --git a/pkg/sentry/kernel/time/time.go b/pkg/sentry/kernel/time/time.go
index 26aa34aa6..191b92811 100644
--- a/pkg/sentry/kernel/time/time.go
+++ b/pkg/sentry/kernel/time/time.go
@@ -22,8 +22,8 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -322,7 +322,7 @@ func SettingFromSpec(value time.Duration, interval time.Duration, c Clock) (Sett
// interpreted as a time relative to now.
func SettingFromSpecAt(value time.Duration, interval time.Duration, now Time) (Setting, error) {
if value < 0 {
- return Setting{}, syserror.EINVAL
+ return Setting{}, linuxerr.EINVAL
}
if value == 0 {
return Setting{Period: interval}, nil
@@ -338,7 +338,7 @@ func SettingFromSpecAt(value time.Duration, interval time.Duration, now Time) (S
// interpreted as an absolute time.
func SettingFromAbsSpec(value Time, interval time.Duration) (Setting, error) {
if value.Before(ZeroTime) {
- return Setting{}, syserror.EINVAL
+ return Setting{}, linuxerr.EINVAL
}
if value.IsZero() {
return Setting{Period: interval}, nil
diff --git a/pkg/sentry/kernel/timekeeper.go b/pkg/sentry/kernel/timekeeper.go
index 7c4fefb16..6255bae7a 100644
--- a/pkg/sentry/kernel/timekeeper.go
+++ b/pkg/sentry/kernel/timekeeper.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
sentrytime "gvisor.dev/gvisor/pkg/sentry/time"
"gvisor.dev/gvisor/pkg/sync"
+ "gvisor.dev/gvisor/pkg/tcpip"
)
// Timekeeper manages all of the kernel clocks.
@@ -39,6 +40,12 @@ type Timekeeper struct {
// It is set only once, by SetClocks.
clocks sentrytime.Clocks `state:"nosave"`
+ // realtimeClock is a ktime.Clock based on timekeeper's Realtime.
+ realtimeClock *timekeeperClock
+
+ // monotonicClock is a ktime.Clock based on timekeeper's Monotonic.
+ monotonicClock *timekeeperClock
+
// bootTime is the realtime when the system "booted". i.e., when
// SetClocks was called in the initial (not restored) run.
bootTime ktime.Time
@@ -90,10 +97,13 @@ type Timekeeper struct {
// NewTimekeeper does not take ownership of paramPage.
//
// SetClocks must be called on the returned Timekeeper before it is usable.
-func NewTimekeeper(mfp pgalloc.MemoryFileProvider, paramPage memmap.FileRange) (*Timekeeper, error) {
- return &Timekeeper{
+func NewTimekeeper(mfp pgalloc.MemoryFileProvider, paramPage memmap.FileRange) *Timekeeper {
+ t := Timekeeper{
params: NewVDSOParamPage(mfp, paramPage),
- }, nil
+ }
+ t.realtimeClock = &timekeeperClock{tk: &t, c: sentrytime.Realtime}
+ t.monotonicClock = &timekeeperClock{tk: &t, c: sentrytime.Monotonic}
+ return &t
}
// SetClocks the backing clock source.
@@ -167,6 +177,32 @@ func (t *Timekeeper) SetClocks(c sentrytime.Clocks) {
}
}
+var _ tcpip.Clock = (*Timekeeper)(nil)
+
+// Now implements tcpip.Clock.
+func (t *Timekeeper) Now() time.Time {
+ nsec, err := t.GetTime(sentrytime.Realtime)
+ if err != nil {
+ panic("timekeeper.GetTime(sentrytime.Realtime): " + err.Error())
+ }
+ return time.Unix(0, nsec)
+}
+
+// NowMonotonic implements tcpip.Clock.
+func (t *Timekeeper) NowMonotonic() tcpip.MonotonicTime {
+ nsec, err := t.GetTime(sentrytime.Monotonic)
+ if err != nil {
+ panic("timekeeper.GetTime(sentrytime.Monotonic): " + err.Error())
+ }
+ var mt tcpip.MonotonicTime
+ return mt.Add(time.Duration(nsec) * time.Nanosecond)
+}
+
+// AfterFunc implements tcpip.Clock.
+func (t *Timekeeper) AfterFunc(d time.Duration, f func()) tcpip.Timer {
+ return ktime.TcpipAfterFunc(t.realtimeClock, d, f)
+}
+
// startUpdater starts an update goroutine that keeps the clocks updated.
//
// mu must be held.
diff --git a/pkg/sentry/kernel/timekeeper_test.go b/pkg/sentry/kernel/timekeeper_test.go
index dfc3c0719..b6039505a 100644
--- a/pkg/sentry/kernel/timekeeper_test.go
+++ b/pkg/sentry/kernel/timekeeper_test.go
@@ -17,12 +17,12 @@ package kernel
import (
"testing"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
sentrytime "gvisor.dev/gvisor/pkg/sentry/time"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// mockClocks is a sentrytime.Clocks that simply returns the times in the
@@ -45,7 +45,7 @@ func (c *mockClocks) GetTime(id sentrytime.ClockID) (int64, error) {
case sentrytime.Realtime:
return c.realtime, nil
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/loader/BUILD b/pkg/sentry/loader/BUILD
index 4c65215fa..560a0f33c 100644
--- a/pkg/sentry/loader/BUILD
+++ b/pkg/sentry/loader/BUILD
@@ -17,8 +17,10 @@ go_library(
deps = [
"//pkg/abi",
"//pkg/abi/linux",
+ "//pkg/abi/linux/errno",
"//pkg/context",
"//pkg/cpuid",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/rand",
@@ -35,7 +37,6 @@ go_library(
"//pkg/sentry/usage",
"//pkg/sentry/vfs",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go
index 8fc3e2a79..fb213d109 100644
--- a/pkg/sentry/loader/elf.go
+++ b/pkg/sentry/loader/elf.go
@@ -24,6 +24,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -31,7 +32,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -115,7 +115,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
log.Infof("Error reading ELF ident: %v", err)
// The entire ident array always exists.
if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = syserror.ENOEXEC
+ err = linuxerr.ENOEXEC
}
return elfInfo{}, err
}
@@ -123,22 +123,22 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
// Only some callers pre-check the ELF magic.
if !bytes.Equal(ident[:len(elfMagic)], []byte(elfMagic)) {
log.Infof("File is not an ELF")
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
// We only support 64-bit, little endian binaries
if class := elf.Class(ident[elf.EI_CLASS]); class != elf.ELFCLASS64 {
log.Infof("Unsupported ELF class: %v", class)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if endian := elf.Data(ident[elf.EI_DATA]); endian != elf.ELFDATA2LSB {
log.Infof("Unsupported ELF endianness: %v", endian)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if version := elf.Version(ident[elf.EI_VERSION]); version != elf.EV_CURRENT {
log.Infof("Unsupported ELF version: %v", version)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
// EI_OSABI is ignored by Linux, which is the only OS supported.
os := abi.Linux
@@ -150,7 +150,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
log.Infof("Error reading ELF header: %v", err)
// The entire header always exists.
if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = syserror.ENOEXEC
+ err = linuxerr.ENOEXEC
}
return elfInfo{}, err
}
@@ -165,7 +165,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
a = arch.ARM64
default:
log.Infof("Unsupported ELF machine %d", machine)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
var sharedObject bool
@@ -177,25 +177,25 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
sharedObject = true
default:
log.Infof("Unsupported ELF type %v", elfType)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if int(hdr.Phentsize) != prog64Size {
log.Infof("Unsupported phdr size %d", hdr.Phentsize)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
totalPhdrSize := prog64Size * int(hdr.Phnum)
if totalPhdrSize < prog64Size {
log.Warningf("No phdrs or total phdr size overflows: prog64Size: %d phnum: %d", prog64Size, int(hdr.Phnum))
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if totalPhdrSize > maxTotalPhdrSize {
log.Infof("Too many phdrs (%d): total size %d > %d", hdr.Phnum, totalPhdrSize, maxTotalPhdrSize)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if int64(hdr.Phoff) < 0 || int64(hdr.Phoff+uint64(totalPhdrSize)) < 0 {
ctx.Infof("Unsupported phdr offset %d", hdr.Phoff)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
phdrBuf := make([]byte, totalPhdrSize)
@@ -204,7 +204,7 @@ func parseHeader(ctx context.Context, f fullReader) (elfInfo, error) {
log.Infof("Error reading ELF phdrs: %v", err)
// If phdrs were specified, they should all exist.
if err == io.EOF || err == io.ErrUnexpectedEOF {
- err = syserror.ENOEXEC
+ err = linuxerr.ENOEXEC
}
return elfInfo{}, err
}
@@ -247,19 +247,19 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
if !ok {
// If offset != 0 we should have ensured this would fit.
ctx.Warningf("Computed segment load address overflows: %#x + %#x", phdr.Vaddr, offset)
- return syserror.ENOEXEC
+ return linuxerr.ENOEXEC
}
addr -= hostarch.Addr(adjust)
fileSize := phdr.Filesz + adjust
if fileSize < phdr.Filesz {
ctx.Infof("Computed segment file size overflows: %#x + %#x", phdr.Filesz, adjust)
- return syserror.ENOEXEC
+ return linuxerr.ENOEXEC
}
ms, ok := hostarch.Addr(fileSize).RoundUp()
if !ok {
ctx.Infof("fileSize %#x too large", fileSize)
- return syserror.ENOEXEC
+ return linuxerr.ENOEXEC
}
mapSize := uint64(ms)
@@ -320,7 +320,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
memSize := phdr.Memsz + adjust
if memSize < phdr.Memsz {
ctx.Infof("Computed segment mem size overflows: %#x + %#x", phdr.Memsz, adjust)
- return syserror.ENOEXEC
+ return linuxerr.ENOEXEC
}
// Allocate more anonymous pages if necessary.
@@ -332,7 +332,7 @@ func mapSegment(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, phdr
anonSize, ok := hostarch.Addr(memSize - mapSize).RoundUp()
if !ok {
ctx.Infof("extra anon pages too large: %#x", memSize-mapSize)
- return syserror.ENOEXEC
+ return linuxerr.ENOEXEC
}
// N.B. Linux uses vm_brk_flags to map these pages, which only
@@ -422,27 +422,27 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
// NOTE(b/37474556): Linux allows out-of-order
// segments, in violation of the spec.
ctx.Infof("PT_LOAD headers out-of-order. %#x < %#x", vaddr, end)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
var ok bool
end, ok = vaddr.AddLength(phdr.Memsz)
if !ok {
ctx.Infof("PT_LOAD header size overflows. %#x + %#x", vaddr, phdr.Memsz)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
case elf.PT_INTERP:
if phdr.Filesz < 2 {
ctx.Infof("PT_INTERP path too small: %v", phdr.Filesz)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
if phdr.Filesz > linux.PATH_MAX {
ctx.Infof("PT_INTERP path too big: %v", phdr.Filesz)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
if int64(phdr.Off) < 0 || int64(phdr.Off+phdr.Filesz) < 0 {
ctx.Infof("Unsupported PT_INTERP offset %d", phdr.Off)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
path := make([]byte, phdr.Filesz)
@@ -450,12 +450,12 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
if err != nil {
// If an interpreter was specified, it should exist.
ctx.Infof("Error reading PT_INTERP path: %v", err)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
if path[len(path)-1] != 0 {
ctx.Infof("PT_INTERP path not NUL-terminated: %v", path)
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
// Strip NUL-terminator and everything beyond from
@@ -476,7 +476,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
// the open path would return a different
// error.
ctx.Infof("PT_INTERP path is empty: %v", path)
- return loadedELF{}, syserror.EACCES
+ return loadedELF{}, linuxerr.EACCES
}
}
}
@@ -497,7 +497,7 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
totalSize, ok := totalSize.RoundUp()
if !ok {
ctx.Infof("ELF PT_LOAD segments too big")
- return loadedELF{}, syserror.ENOEXEC
+ return loadedELF{}, linuxerr.ENOEXEC
}
var err error
@@ -517,13 +517,13 @@ func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, in
start, ok = start.AddLength(uint64(offset))
if !ok {
ctx.Infof(fmt.Sprintf("Start %#x + offset %#x overflows?", start, offset))
- return loadedELF{}, syserror.EINVAL
+ return loadedELF{}, linuxerr.EINVAL
}
end, ok = end.AddLength(uint64(offset))
if !ok {
ctx.Infof(fmt.Sprintf("End %#x + offset %#x overflows?", end, offset))
- return loadedELF{}, syserror.EINVAL
+ return loadedELF{}, linuxerr.EINVAL
}
info.entry, ok = info.entry.AddLength(uint64(offset))
@@ -591,7 +591,7 @@ func loadInitialELF(ctx context.Context, m *mm.MemoryManager, fs *cpuid.FeatureS
// Check Image Compatibility.
if arch.Host != info.arch {
ctx.Warningf("Found mismatch for platform %s with ELF type %s", arch.Host.String(), info.arch.String())
- return loadedELF{}, nil, syserror.ENOEXEC
+ return loadedELF{}, nil, linuxerr.ENOEXEC
}
// Create the arch.Context now so we can prepare the mmap layout before
@@ -621,20 +621,20 @@ func loadInitialELF(ctx context.Context, m *mm.MemoryManager, fs *cpuid.FeatureS
func loadInterpreterELF(ctx context.Context, m *mm.MemoryManager, f fsbridge.File, initial loadedELF) (loadedELF, error) {
info, err := parseHeader(ctx, f)
if err != nil {
- if err == syserror.ENOEXEC {
+ if linuxerr.Equals(linuxerr.ENOEXEC, err) {
// Bad interpreter.
- err = syserror.ELIBBAD
+ err = linuxerr.ELIBBAD
}
return loadedELF{}, err
}
if info.os != initial.os {
ctx.Infof("Initial ELF OS %v and interpreter ELF OS %v differ", initial.os, info.os)
- return loadedELF{}, syserror.ELIBBAD
+ return loadedELF{}, linuxerr.ELIBBAD
}
if info.arch != initial.arch {
ctx.Infof("Initial ELF arch %v and interpreter ELF arch %v differ", initial.arch, info.arch)
- return loadedELF{}, syserror.ELIBBAD
+ return loadedELF{}, linuxerr.ELIBBAD
}
// The interpreter is not given a load offset, as its location does not
@@ -680,7 +680,7 @@ func loadELF(ctx context.Context, args LoadArgs) (loadedELF, arch.Context, error
if interp.interpreter != "" {
// No recursive interpreters!
ctx.Infof("Interpreter requires an interpreter")
- return loadedELF{}, nil, syserror.ENOEXEC
+ return loadedELF{}, nil, linuxerr.ENOEXEC
}
}
diff --git a/pkg/sentry/loader/interpreter.go b/pkg/sentry/loader/interpreter.go
index 3e302d92c..1ec0d7019 100644
--- a/pkg/sentry/loader/interpreter.go
+++ b/pkg/sentry/loader/interpreter.go
@@ -19,8 +19,8 @@ import (
"io"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -43,14 +43,14 @@ func parseInterpreterScript(ctx context.Context, filename string, f fsbridge.Fil
// Short read is OK.
if err != nil && err != io.ErrUnexpectedEOF {
if err == io.EOF {
- err = syserror.ENOEXEC
+ err = linuxerr.ENOEXEC
}
return "", []string{}, err
}
line = line[:n]
if !bytes.Equal(line[:2], []byte(interpreterScriptMagic)) {
- return "", []string{}, syserror.ENOEXEC
+ return "", []string{}, linuxerr.ENOEXEC
}
// Ignore #!.
line = line[2:]
@@ -82,7 +82,7 @@ func parseInterpreterScript(ctx context.Context, filename string, f fsbridge.Fil
if string(interp) == "" {
ctx.Infof("Interpreter script contains no interpreter: %v", line)
- return "", []string{}, syserror.ENOEXEC
+ return "", []string{}, linuxerr.ENOEXEC
}
// Build the new argument list:
diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go
index 47e3775a3..2759ef71e 100644
--- a/pkg/sentry/loader/loader.go
+++ b/pkg/sentry/loader/loader.go
@@ -23,8 +23,10 @@ import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/abi/linux/errno"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/cpuid"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -33,7 +35,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/mm"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -89,7 +90,7 @@ type LoadArgs struct {
func openPath(ctx context.Context, args LoadArgs) (fsbridge.File, error) {
if args.Filename == "" {
ctx.Infof("cannot open empty name")
- return nil, syserror.ENOENT
+ return nil, linuxerr.ENOENT
}
// TODO(gvisor.dev/issue/160): Linux requires only execute permission,
@@ -112,7 +113,7 @@ func checkIsRegularFile(ctx context.Context, file fsbridge.File, filename string
}
if t != linux.ModeRegular {
ctx.Infof("%q is not a regular file: %v", filename, t)
- return syserror.EACCES
+ return linuxerr.EACCES
}
return nil
}
@@ -170,7 +171,7 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context
// (e.g., #!a).
if err != nil && err != io.ErrUnexpectedEOF {
if err == io.EOF {
- err = syserror.ENOEXEC
+ err = linuxerr.ENOEXEC
}
return loadedELF{}, nil, nil, nil, err
}
@@ -188,7 +189,7 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context
case bytes.Equal(hdr[:2], []byte(interpreterScriptMagic)):
if args.CloseOnExec {
- return loadedELF{}, nil, nil, nil, syserror.ENOENT
+ return loadedELF{}, nil, nil, nil, linuxerr.ENOENT
}
args.Filename, args.Argv, err = parseInterpreterScript(ctx, args.Filename, args.File, args.Argv)
if err != nil {
@@ -200,13 +201,13 @@ func loadExecutable(ctx context.Context, args LoadArgs) (loadedELF, arch.Context
default:
ctx.Infof("Unknown magic: %v", hdr)
- return loadedELF{}, nil, nil, nil, syserror.ENOEXEC
+ return loadedELF{}, nil, nil, nil, linuxerr.ENOEXEC
}
// Set to nil in case we loop on a Interpreter Script.
args.File = nil
}
- return loadedELF{}, nil, nil, nil, syserror.ELOOP
+ return loadedELF{}, nil, nil, nil, linuxerr.ELOOP
}
// Load loads args.File into a MemoryManager. If args.File is nil, the path
@@ -237,7 +238,7 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V
// loaded.end is available for its use.
e, ok := loaded.end.RoundUp()
if !ok {
- return 0, nil, "", syserr.NewDynamic(fmt.Sprintf("brk overflows: %#x", loaded.end), linux.ENOEXEC)
+ return 0, nil, "", syserr.NewDynamic(fmt.Sprintf("brk overflows: %#x", loaded.end), errno.ENOEXEC)
}
args.MemoryManager.BrkSetup(ctx, e)
@@ -294,15 +295,7 @@ func Load(ctx context.Context, args LoadArgs, extraAuxv []arch.AuxEntry, vdso *V
m.SetEnvvEnd(sl.EnvvEnd)
m.SetAuxv(auxv)
m.SetExecutable(ctx, file)
-
- symbolValue, err := getSymbolValueFromVDSO("rt_sigreturn")
- if err != nil {
- return 0, nil, "", syserr.NewDynamic(fmt.Sprintf("Failed to find rt_sigreturn in vdso: %v", err), syserr.FromError(err).ToLinux())
- }
-
- // Found rt_sigretrun.
- addr := uint64(vdsoAddr) + symbolValue - vdsoPrelink
- m.SetVDSOSigReturn(addr)
+ m.SetVDSOSigReturn(uint64(vdsoAddr) + vdsoSigreturnOffset - vdsoPrelink)
ac.SetIP(uintptr(loaded.entry))
ac.SetStack(uintptr(stack.Bottom))
diff --git a/pkg/sentry/loader/vdso.go b/pkg/sentry/loader/vdso.go
index fd54261fd..bcee6aef6 100644
--- a/pkg/sentry/loader/vdso.go
+++ b/pkg/sentry/loader/vdso.go
@@ -19,10 +19,10 @@ import (
"debug/elf"
"fmt"
"io"
- "strings"
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
@@ -33,7 +33,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -58,7 +57,7 @@ type byteFullReader struct {
func (b *byteFullReader) ReadFull(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) {
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset >= int64(len(b.data)) {
return 0, io.EOF
@@ -101,14 +100,14 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
first = &info.phdrs[i]
if phdr.Off != 0 {
log.Warningf("First PT_LOAD segment has non-zero file offset")
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
}
memoryOffset := phdr.Vaddr - first.Vaddr
if memoryOffset != phdr.Off {
log.Warningf("PT_LOAD segment memory offset %#x != file offset %#x", memoryOffset, phdr.Off)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
// memsz larger than filesz means that extra zeroed space should be
@@ -117,24 +116,24 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
// zeroes.
if phdr.Memsz != phdr.Filesz {
log.Warningf("PT_LOAD segment memsz %#x != filesz %#x", phdr.Memsz, phdr.Filesz)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
start := hostarch.Addr(memoryOffset)
end, ok := start.AddLength(phdr.Memsz)
if !ok {
log.Warningf("PT_LOAD segment size overflows: %#x + %#x", start, end)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if uint64(end) > size {
log.Warningf("PT_LOAD segment end %#x extends beyond end of file %#x", end, size)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
if prev != nil {
if start < prevEnd {
log.Warningf("PT_LOAD segments out of order")
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
// We mprotect entire pages, so each segment must be in
@@ -143,7 +142,7 @@ func validateVDSO(ctx context.Context, f fullReader, size uint64) (elfInfo, erro
startPage := start.RoundDown()
if prevEndPage >= startPage {
log.Warningf("PT_LOAD segments share a page: %#x", prevEndPage)
- return elfInfo{}, syserror.ENOEXEC
+ return elfInfo{}, linuxerr.ENOEXEC
}
}
prev = &info.phdrs[i]
@@ -177,27 +176,6 @@ type VDSO struct {
phdrs []elf.ProgHeader `state:".([]elfProgHeader)"`
}
-// getSymbolValueFromVDSO returns the specific symbol value in vdso.so.
-func getSymbolValueFromVDSO(symbol string) (uint64, error) {
- f, err := elf.NewFile(bytes.NewReader(vdsodata.Binary))
- if err != nil {
- return 0, err
- }
- syms, err := f.Symbols()
- if err != nil {
- return 0, err
- }
-
- for _, sym := range syms {
- if elf.ST_BIND(sym.Info) != elf.STB_LOCAL && sym.Section != elf.SHN_UNDEF {
- if strings.Contains(sym.Name, symbol) {
- return sym.Value, nil
- }
- }
- }
- return 0, fmt.Errorf("no %v in vdso.so", symbol)
-}
-
// PrepareVDSO validates the system VDSO and returns a VDSO, containing the
// param page for updating by the kernel.
func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
@@ -270,11 +248,11 @@ func PrepareVDSO(mfp pgalloc.MemoryFileProvider) (*VDSO, error) {
func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (hostarch.Addr, error) {
if v.os != bin.os {
ctx.Warningf("Binary ELF OS %v and VDSO ELF OS %v differ", bin.os, v.os)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
if v.arch != bin.arch {
ctx.Warningf("Binary ELF arch %v and VDSO ELF arch %v differ", bin.arch, v.arch)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
// Reserve address space for the VDSO and its parameter page, which is
@@ -347,35 +325,35 @@ func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF)
segAddr, ok := vdsoAddr.AddLength(memoryOffset)
if !ok {
ctx.Warningf("PT_LOAD segment address overflows: %#x + %#x", segAddr, memoryOffset)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
segPage := segAddr.RoundDown()
segSize := hostarch.Addr(phdr.Memsz)
segSize, ok = segSize.AddLength(segAddr.PageOffset())
if !ok {
ctx.Warningf("PT_LOAD segment memsize %#x + offset %#x overflows", phdr.Memsz, segAddr.PageOffset())
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
segSize, ok = segSize.RoundUp()
if !ok {
ctx.Warningf("PT_LOAD segment size overflows: %#x", phdr.Memsz+segAddr.PageOffset())
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
segEnd, ok := segPage.AddLength(uint64(segSize))
if !ok {
ctx.Warningf("PT_LOAD segment range overflows: %#x + %#x", segAddr, segSize)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
if segEnd > vdsoEnd {
ctx.Warningf("PT_LOAD segment ends beyond VDSO: %#x > %#x", segEnd, vdsoEnd)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
perms := progFlagsAsPerms(phdr.Flags)
if perms != hostarch.Read {
if err := m.MProtect(segPage, uint64(segSize), perms, false); err != nil {
ctx.Warningf("Unable to set PT_LOAD segment protections %+v at [%#x, %#x): %v", perms, segAddr, segEnd, err)
- return 0, syserror.ENOEXEC
+ return 0, linuxerr.ENOEXEC
}
}
}
@@ -388,3 +366,21 @@ func (v *VDSO) Release(ctx context.Context) {
v.ParamPage.DecRef(ctx)
v.vdso.DecRef(ctx)
}
+
+var vdsoSigreturnOffset = func() uint64 {
+ f, err := elf.NewFile(bytes.NewReader(vdsodata.Binary))
+ if err != nil {
+ panic(fmt.Sprintf("failed to parse vdso.so as ELF file: %v", err))
+ }
+ syms, err := f.Symbols()
+ if err != nil {
+ panic(fmt.Sprintf("failed to read symbols from vdso.so: %v", err))
+ }
+ const sigreturnSymbol = "__kernel_rt_sigreturn"
+ for _, sym := range syms {
+ if elf.ST_BIND(sym.Info) != elf.STB_LOCAL && sym.Section != elf.SHN_UNDEF && sym.Name == sigreturnSymbol {
+ return sym.Value
+ }
+ }
+ panic(fmt.Sprintf("no symbol %q in vdso.so", sigreturnSymbol))
+}()
diff --git a/pkg/sentry/memmap/BUILD b/pkg/sentry/memmap/BUILD
index c30e88725..a89bfa680 100644
--- a/pkg/sentry/memmap/BUILD
+++ b/pkg/sentry/memmap/BUILD
@@ -54,7 +54,6 @@ go_library(
"//pkg/hostarch",
"//pkg/log",
"//pkg/safemem",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD
index b417c2da7..b7d782b7f 100644
--- a/pkg/sentry/mm/BUILD
+++ b/pkg/sentry/mm/BUILD
@@ -125,6 +125,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/atomicbitops",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/refs",
@@ -143,7 +144,6 @@ go_library(
"//pkg/sentry/platform",
"//pkg/sentry/usage",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/tcpip/buffer",
"//pkg/usermem",
],
@@ -156,6 +156,7 @@ go_test(
library = ":mm",
deps = [
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/sentry/arch",
"//pkg/sentry/contexttest",
@@ -163,7 +164,6 @@ go_test(
"//pkg/sentry/memmap",
"//pkg/sentry/pgalloc",
"//pkg/sentry/platform",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go
index 346866d3c..d71d64580 100644
--- a/pkg/sentry/mm/aio_context.go
+++ b/pkg/sentry/mm/aio_context.go
@@ -17,12 +17,12 @@ package mm
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -77,15 +77,6 @@ func (mm *MemoryManager) destroyAIOContextLocked(ctx context.Context, id uint64)
return nil
}
- // Only unmaps after it assured that the address is a valid aio context to
- // prevent random memory from been unmapped.
- //
- // Note: It's possible to unmap this address and map something else into
- // the same address. Then it would be unmapping memory that it doesn't own.
- // This is, however, the way Linux implements AIO. Keeps the same [weird]
- // semantics in case anyone relies on it.
- mm.MUnmap(ctx, hostarch.Addr(id), aioRingBufferSize)
-
delete(mm.aioManager.contexts, id)
aioCtx.destroy()
return aioCtx
@@ -158,11 +149,11 @@ func (ctx *AIOContext) Prepare() error {
defer ctx.mu.Unlock()
if ctx.dead {
// Context died after the caller looked it up.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if ctx.outstanding >= ctx.maxOutstanding {
// Context is busy.
- return syserror.EAGAIN
+ return linuxerr.EAGAIN
}
ctx.outstanding++
return nil
@@ -297,7 +288,7 @@ func (m *aioMappable) InodeID() uint64 {
// Msync implements memmap.MappingIdentity.Msync.
func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error {
// Linux: aio_ring_fops.fsync == NULL
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// AddMapping implements memmap.Mappable.AddMapping.
@@ -305,7 +296,7 @@ func (m *aioMappable) AddMapping(_ context.Context, _ memmap.MappingSpace, ar ho
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(ar.Length()) != aioRingBufferSize {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
return nil
}
@@ -319,13 +310,13 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
// Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap()
// sets VM_DONTEXPAND).
if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// Require that the mapping correspond to a live AIOContext. Compare
// Linux's fs/aio.c:aio_ring_mremap().
mm, ok := ms.(*MemoryManager)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
am := &mm.aioManager
am.mu.Lock()
@@ -333,12 +324,12 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
oldID := uint64(srcAR.Start)
aioCtx, ok := am.contexts[oldID]
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
aioCtx.mu.Lock()
defer aioCtx.mu.Unlock()
if aioCtx.dead {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Use the new ID for the AIOContext.
am.contexts[uint64(dstAR.Start)] = aioCtx
@@ -350,7 +341,7 @@ func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, s
func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
- err = &memmap.BusError{syserror.EFAULT}
+ err = &memmap.BusError{linuxerr.EFAULT}
}
if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 {
return []memmap.Translation{
@@ -399,7 +390,7 @@ func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint
id := uint64(addr)
if !mm.aioManager.newAIOContext(events, id) {
mm.MUnmap(ctx, addr, aioRingBufferSize)
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
return id, nil
}
@@ -411,6 +402,15 @@ func (mm *MemoryManager) DestroyAIOContext(ctx context.Context, id uint64) *AIOC
return nil
}
+ // Only unmaps after it assured that the address is a valid aio context to
+ // prevent random memory from been unmapped.
+ //
+ // Note: It's possible to unmap this address and map something else into
+ // the same address. Then it would be unmapping memory that it doesn't own.
+ // This is, however, the way Linux implements AIO. Keeps the same [weird]
+ // semantics in case anyone relies on it.
+ mm.MUnmap(ctx, hostarch.Addr(id), aioRingBufferSize)
+
mm.aioManager.mu.Lock()
defer mm.aioManager.mu.Unlock()
return mm.destroyAIOContextLocked(ctx, id)
diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go
index 16f318ab3..5fcfeb473 100644
--- a/pkg/sentry/mm/io.go
+++ b/pkg/sentry/mm/io.go
@@ -16,10 +16,10 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -97,14 +97,14 @@ func translateIOError(ctx context.Context, err error) error {
if logIOErrors {
ctx.Debugf("MM I/O error: %v", err)
}
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// CopyOut implements usermem.IO.CopyOut.
func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(src)))
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if len(src) == 0 {
@@ -147,7 +147,7 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src
func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) {
ar, ok := mm.CheckIORange(addr, int64(len(dst)))
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if len(dst) == 0 {
@@ -190,7 +190,7 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst [
func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) {
ar, ok := mm.CheckIORange(addr, toZero)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if toZero == 0 {
@@ -231,7 +231,7 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZe
// CopyOutFrom implements usermem.IO.CopyOutFrom.
func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if ars.NumBytes() == 0 {
@@ -276,7 +276,7 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRange
// CopyInTo implements usermem.IO.CopyInTo.
func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) {
if !mm.checkIOVec(ars) {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
if ars.NumBytes() == 0 {
@@ -314,7 +314,7 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq
func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -339,7 +339,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new
_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
@@ -357,7 +357,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new
func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -382,7 +382,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch
_, err := mm.withInternalMappings(ctx, ar, hostarch.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
@@ -400,7 +400,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch
func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opts usermem.IOOpts) (uint32, error) {
ar, ok := mm.CheckIORange(addr, 4)
if !ok {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Do AddressSpace IO if applicable.
@@ -425,7 +425,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opt
_, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) {
if ims.NumBlocks() != 1 || ims.NumBytes() != 4 {
// Atomicity is unachievable across mappings.
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
im := ims.Head()
var err error
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index 57969b26c..0fca59b64 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -28,6 +28,7 @@
// memmap.File locks
// mm.aioManager.mu
// mm.AIOContext.mu
+// kernel.TaskSet.mu
//
// Only mm.MemoryManager.Fork is permitted to lock mm.MemoryManager.activeMu in
// multiple mm.MemoryManagers, as it does so in a well-defined order (forked
diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go
index 1304b0a2f..84cb8158d 100644
--- a/pkg/sentry/mm/mm_test.go
+++ b/pkg/sentry/mm/mm_test.go
@@ -18,6 +18,7 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/platform"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -171,7 +171,7 @@ func TestIOAfterUnmap(t *testing.T) {
}
n, err = mm.CopyIn(ctx, addr, b, usermem.IOOpts{})
- if err != syserror.EFAULT {
+ if !linuxerr.Equals(linuxerr.EFAULT, err) {
t.Errorf("CopyIn got err %v want EFAULT", err)
}
if n != 0 {
@@ -212,7 +212,7 @@ func TestIOAfterMProtect(t *testing.T) {
// Without IgnorePermissions, CopyOut should no longer succeed.
n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{})
- if err != syserror.EFAULT {
+ if !linuxerr.Equals(linuxerr.EFAULT, err) {
t.Errorf("CopyOut got err %v want EFAULT", err)
}
if n != 0 {
@@ -249,7 +249,7 @@ func TestAIOPrepareAfterDestroy(t *testing.T) {
mm.DestroyAIOContext(ctx, id)
// Prepare should fail because aioCtx should be destroyed.
- if err := aioCtx.Prepare(); err != syserror.EINVAL {
+ if err := aioCtx.Prepare(); !linuxerr.Equals(linuxerr.EINVAL, err) {
t.Errorf("aioCtx.Prepare got err %v want nil", err)
} else if err == nil {
aioCtx.CancelPendingRequest()
diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go
index 5583f62b2..05cdcd8ae 100644
--- a/pkg/sentry/mm/pma.go
+++ b/pkg/sentry/mm/pma.go
@@ -18,12 +18,12 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
@@ -116,7 +116,7 @@ func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar
var alignerr error
if !ok {
end = ar.End.RoundDown()
- alignerr = syserror.EFAULT
+ alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
@@ -162,7 +162,7 @@ func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.Addr
var alignerr error
if !ok {
end = ar.End.RoundDown()
- alignerr = syserror.EFAULT
+ alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
@@ -324,20 +324,37 @@ func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIter
panic(fmt.Sprintf("pma %v needs to be copied for writing, but is not readable: %v", pseg.Range(), oldpma))
}
}
- // The majority of copy-on-write breaks on executable pages
- // come from:
- //
- // - The ELF loader, which must zero out bytes on the last
- // page of each segment after the end of the segment.
- //
- // - gdb's use of ptrace to insert breakpoints.
- //
- // Neither of these cases has enough spatial locality to
- // benefit from copying nearby pages, so if the vma is
- // executable, only copy the pages required.
var copyAR hostarch.AddrRange
- if vseg.ValuePtr().effectivePerms.Execute {
+ if vma := vseg.ValuePtr(); vma.effectivePerms.Execute {
+ // The majority of copy-on-write breaks on executable
+ // pages come from:
+ //
+ // - The ELF loader, which must zero out bytes on the
+ // last page of each segment after the end of the
+ // segment.
+ //
+ // - gdb's use of ptrace to insert breakpoints.
+ //
+ // Neither of these cases has enough spatial locality
+ // to benefit from copying nearby pages, so if the vma
+ // is executable, only copy the pages required.
copyAR = pseg.Range().Intersect(ar)
+ } else if vma.growsDown {
+ // In most cases, the new process will not use most of
+ // its stack before exiting or invoking execve(); it is
+ // especially unlikely to return very far down its call
+ // stack, since async-signal-safety concerns in
+ // multithreaded programs prevent the new process from
+ // being able to do much. So only copy up to one page
+ // before and after the pages required.
+ stackMaskAR := ar
+ if newStart := stackMaskAR.Start - hostarch.PageSize; newStart < stackMaskAR.Start {
+ stackMaskAR.Start = newStart
+ }
+ if newEnd := stackMaskAR.End + hostarch.PageSize; newEnd > stackMaskAR.End {
+ stackMaskAR.End = newEnd
+ }
+ copyAR = pseg.Range().Intersect(stackMaskAR)
} else {
copyAR = pseg.Range().Intersect(maskAR)
}
diff --git a/pkg/sentry/mm/shm.go b/pkg/sentry/mm/shm.go
index 3130be80c..94d5112a1 100644
--- a/pkg/sentry/mm/shm.go
+++ b/pkg/sentry/mm/shm.go
@@ -16,16 +16,16 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/shm"
- "gvisor.dev/gvisor/pkg/syserror"
)
// DetachShm unmaps a sysv shared memory segment.
func (mm *MemoryManager) DetachShm(ctx context.Context, addr hostarch.Addr) error {
if addr != addr.RoundDown() {
// "... shmaddr is not aligned on a page boundary." - man shmdt(2)
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
var detached *shm.Shm
@@ -48,7 +48,7 @@ func (mm *MemoryManager) DetachShm(ctx context.Context, addr hostarch.Addr) erro
if detached == nil {
// There is no shared memory segment attached at addr.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Remove all vmas that could have been created by the same attach.
diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go
index e748b7ff8..69c6e77a7 100644
--- a/pkg/sentry/mm/special_mappable.go
+++ b/pkg/sentry/mm/special_mappable.go
@@ -16,11 +16,11 @@ package mm
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
// SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with
@@ -94,7 +94,7 @@ func (*SpecialMappable) CopyMapping(context.Context, memmap.MappingSpace, hostar
func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
var err error
if required.End > m.fr.Length() {
- err = &memmap.BusError{syserror.EFAULT}
+ err = &memmap.BusError{linuxerr.EFAULT}
}
if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 {
return []memmap.Translation{
@@ -144,11 +144,11 @@ func (m *SpecialMappable) Length() uint64 {
// leak (b/143656263). Delete this function along with VFS1.
func NewSharedAnonMappable(length uint64, mfp pgalloc.MemoryFileProvider) (*SpecialMappable, error) {
if length == 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
alignedLen, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
fr, err := mfp.MemoryFile().Allocate(uint64(alignedLen), usage.Anonymous)
if err != nil {
diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go
index 7ad6b7c21..dc12ad357 100644
--- a/pkg/sentry/mm/syscalls.go
+++ b/pkg/sentry/mm/syscalls.go
@@ -21,12 +21,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/kernel/futex"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
)
// HandleUserFault handles an application page fault. sp is the faulting
@@ -36,7 +36,7 @@ import (
func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr, at hostarch.AccessType, sp hostarch.Addr) error {
ar, ok := addr.RoundDown().ToRange(hostarch.PageSize)
if !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
// Don't bother trying existingPMAsLocked; in most cases, if we did have
@@ -74,22 +74,22 @@ func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr hostarch.Addr
// MMap establishes a memory mapping.
func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostarch.Addr, error) {
if opts.Length == 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
length, ok := hostarch.Addr(opts.Length).RoundUp()
if !ok {
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
opts.Length = uint64(length)
if opts.Mappable != nil {
// Offset must be aligned.
if hostarch.Addr(opts.Offset).RoundDown() != hostarch.Addr(opts.Offset) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Offset + length must not overflow.
if end := opts.Offset + opts.Length; end < opts.Offset {
- return 0, syserror.ENOMEM
+ return 0, linuxerr.EOVERFLOW
}
} else {
opts.Offset = 0
@@ -99,19 +99,19 @@ func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (hostar
// MAP_FIXED requires addr to be page-aligned; non-fixed mappings
// don't.
if opts.Fixed {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
opts.Addr = opts.Addr.RoundDown()
}
if !opts.MaxPerms.SupersetOf(opts.Perms) {
- return 0, syserror.EACCES
+ return 0, linuxerr.EACCES
}
if opts.Unmap && !opts.Fixed {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if opts.GrowsDown && opts.Mappable != nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get the new vma.
@@ -203,6 +203,7 @@ func (mm *MemoryManager) populateVMA(ctx context.Context, vseg vmaIterator, ar h
// * vseg.Range().IsSupersetOf(ar).
//
// Postconditions: mm.mappingMu will be unlocked.
+// +checklocksrelease:mm.mappingMu
func (mm *MemoryManager) populateVMAAndUnlock(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, precommit bool) {
// See populateVMA above for commentary.
if !vseg.ValuePtr().effectivePerms.Any() {
@@ -251,7 +252,7 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, erro
ctx.Warningf("Capping stack size from RLIMIT_STACK of %v down to %v.", sz, maxStackSize)
sz = maxStackSize
} else if sz == 0 {
- return hostarch.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, linuxerr.ENOMEM
}
szaddr := hostarch.Addr(sz)
ctx.Debugf("Allocating stack with size of %v bytes", sz)
@@ -260,7 +261,7 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, erro
// randomization can't be disabled.
stackEnd := mm.layout.MaxAddr - hostarch.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown()
if stackEnd < szaddr {
- return hostarch.AddrRange{}, syserror.ENOMEM
+ return hostarch.AddrRange{}, linuxerr.ENOMEM
}
stackStart := stackEnd - szaddr
mm.mappingMu.Lock()
@@ -281,18 +282,18 @@ func (mm *MemoryManager) MapStack(ctx context.Context) (hostarch.AddrRange, erro
// MUnmap implements the semantics of Linux's munmap(2).
func (mm *MemoryManager) MUnmap(ctx context.Context, addr hostarch.Addr, length uint64) error {
if addr != addr.RoundDown() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
la, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -331,7 +332,7 @@ const (
func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (hostarch.Addr, error) {
// "Note that old_address has to be page aligned." - mremap(2)
if oldAddr.RoundDown() != oldAddr {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Linux treats an old_size that rounds up to 0 as 0, which is otherwise a
@@ -340,13 +341,13 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
oldSize = uint64(oldSizeAddr)
newSizeAddr, ok := hostarch.Addr(newSize).RoundUp()
if !ok || newSizeAddr == 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
newSize = uint64(newSizeAddr)
oldEnd, ok := oldAddr.AddLength(oldSize)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -355,7 +356,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// All cases require that a vma exists at oldAddr.
vseg := mm.vmas.FindSegment(oldAddr)
if !vseg.Ok() {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Behavior matrix:
@@ -379,7 +380,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
if newLockedAS := mm.lockedAS - oldSize + newSize; newLockedAS > mlockLimit {
- return 0, syserror.EAGAIN
+ return 0, linuxerr.EAGAIN
}
}
}
@@ -402,7 +403,7 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// "Grow" the existing vma by creating a new mergeable one.
vma := vseg.ValuePtr()
@@ -450,15 +451,15 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
case MRemapMustMove:
newAddr := opts.NewAddr
if newAddr.RoundDown() != newAddr {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
var ok bool
newAR, ok = newAddr.ToRange(newSize)
if !ok {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if (hostarch.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that the new region is valid.
@@ -492,19 +493,19 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// Check that oldEnd maps to the same vma as oldAddr.
if vseg.End() < oldEnd {
- return 0, syserror.EFAULT
+ return 0, linuxerr.EFAULT
}
// Check against RLIMIT_AS.
newUsageAS := mm.usageAS - uint64(oldAR.Length()) + uint64(newAR.Length())
if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS {
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
if vma := vseg.ValuePtr(); vma.mappable != nil {
// Check that offset+length does not overflow.
if vma.off+uint64(newAR.Length()) < vma.off {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Inform the Mappable, if any, of the new mapping.
if err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start), vma.canWriteMappableLocked()); err != nil {
@@ -590,18 +591,18 @@ func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr hostarch.Addr, oldS
// MProtect implements the semantics of Linux's mprotect(2).
func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms hostarch.AccessType, growsDown bool) error {
if addr.RoundDown() != addr {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
return nil
}
rlength, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
ar, ok := addr.ToRange(uint64(rlength))
if !ok {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
effectivePerms := realPerms.Effective()
@@ -614,19 +615,19 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h
// the non-growsDown case.
vseg := mm.vmas.LowerBoundSegment(ar.Start)
if !vseg.Ok() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
if growsDown {
if !vseg.ValuePtr().growsDown {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if ar.End <= vseg.Start() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
ar.Start = vseg.Start()
} else {
if ar.Start < vseg.Start() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
}
@@ -644,7 +645,7 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h
// Check for permission validity before splitting vmas, for consistency
// with Linux.
if !vseg.ValuePtr().maxPerms.SupersetOf(effectivePerms) {
- return syserror.EACCES
+ return linuxerr.EACCES
}
vseg = mm.vmas.Isolate(vseg, ar)
@@ -686,7 +687,7 @@ func (mm *MemoryManager) MProtect(addr hostarch.Addr, length uint64, realPerms h
}
vseg, _ = vseg.NextNonEmpty()
if !vseg.Ok() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
}
}
@@ -711,7 +712,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if addr < mm.brk.Start {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.EINVAL
+ return addr, linuxerr.EINVAL
}
// TODO(gvisor.dev/issue/156): This enforces RLIMIT_DATA, but is
@@ -722,7 +723,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if uint64(addr-mm.brk.Start) > limits.FromContext(ctx).Get(limits.Data).Cur {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.ENOMEM
+ return addr, linuxerr.ENOMEM
}
oldbrkpg, _ := mm.brk.End.RoundUp()
@@ -730,7 +731,7 @@ func (mm *MemoryManager) Brk(ctx context.Context, addr hostarch.Addr) (hostarch.
if !ok {
addr = mm.brk.End
mm.mappingMu.Unlock()
- return addr, syserror.EFAULT
+ return addr, linuxerr.EFAULT
}
switch {
@@ -780,7 +781,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
la, _ := hostarch.Addr(length + addr.PageOffset()).RoundUp()
ar, ok := addr.RoundDown().ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -792,11 +793,11 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
mm.mappingMu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
if newLockedAS := mm.lockedAS + uint64(ar.Length()) - mm.mlockedBytesRangeLocked(ar); newLockedAS > mlockLimit {
mm.mappingMu.Unlock()
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
}
}
@@ -833,7 +834,7 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
mm.vmas.MergeAdjacent(ar)
if unmapped {
mm.mappingMu.Unlock()
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
if mode == memmap.MLockEager {
@@ -848,18 +849,18 @@ func (mm *MemoryManager) MLock(ctx context.Context, addr hostarch.Addr, length u
// case, which is converted to ENOMEM by mlock.
mm.activeMu.Unlock()
mm.mappingMu.RUnlock()
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
_, _, err := mm.getPMAsLocked(ctx, vseg, vseg.Range().Intersect(ar), hostarch.NoAccess)
if err != nil {
mm.activeMu.Unlock()
mm.mappingMu.RUnlock()
// Linux: mm/mlock.c:__mlock_posix_error_return()
- if err == syserror.EFAULT {
- return syserror.ENOMEM
+ if linuxerr.Equals(linuxerr.EFAULT, err) {
+ return linuxerr.ENOMEM
}
- if err == syserror.ENOMEM {
- return syserror.EAGAIN
+ if linuxerr.Equals(linuxerr.ENOMEM, err) {
+ return linuxerr.EAGAIN
}
return err
}
@@ -898,7 +899,7 @@ type MLockAllOpts struct {
// depending on opts.
func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error {
if !opts.Current && !opts.Future {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -911,11 +912,11 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
mm.mappingMu.Unlock()
- return syserror.EPERM
+ return linuxerr.EPERM
}
if uint64(mm.vmas.Span()) > mlockLimit {
mm.mappingMu.Unlock()
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
}
}
@@ -970,7 +971,7 @@ func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint6
defer mm.mappingMu.RUnlock()
vseg := mm.vmas.FindSegment(addr)
if !vseg.Ok() {
- return 0, 0, syserror.EFAULT
+ return 0, 0, linuxerr.EFAULT
}
vma := vseg.ValuePtr()
return vma.numaPolicy, vma.numaNodemask, nil
@@ -979,13 +980,13 @@ func (mm *MemoryManager) NumaPolicy(addr hostarch.Addr) (linux.NumaPolicy, uint6
// SetNumaPolicy implements the semantics of Linux's mbind().
func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy linux.NumaPolicy, nodemask uint64) error {
if !addr.IsPageAligned() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Linux allows this to overflow.
la, _ := hostarch.Addr(length).RoundUp()
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if ar.Length() == 0 {
return nil
@@ -1003,7 +1004,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy
if !vseg.Ok() || lastEnd < vseg.Start() {
// "EFAULT: ... there was an unmapped hole in the specified memory
// range specified [sic] by addr and len." - mbind(2)
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
vseg = mm.vmas.Isolate(vseg, ar)
vma := vseg.ValuePtr()
@@ -1021,7 +1022,7 @@ func (mm *MemoryManager) SetNumaPolicy(addr hostarch.Addr, length uint64, policy
func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork bool) error {
ar, ok := addr.ToRange(length)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.Lock()
@@ -1038,7 +1039,7 @@ func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork
}
if mm.vmas.SpanRange(ar) != ar.Length() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
return nil
}
@@ -1047,7 +1048,7 @@ func (mm *MemoryManager) SetDontFork(addr hostarch.Addr, length uint64, dontfork
func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
ar, ok := addr.ToRange(length)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
mm.mappingMu.RLock()
@@ -1063,7 +1064,7 @@ func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
for vseg := mm.vmas.LowerBoundSegment(ar.Start); vseg.Ok() && vseg.Start() < ar.End; vseg = vseg.NextSegment() {
vma := vseg.ValuePtr()
if vma.mlockMode != memmap.MLockNone {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
vsegAR := vseg.Range().Intersect(ar)
// pseg should already correspond to either this vma or a later one,
@@ -1097,7 +1098,7 @@ func (mm *MemoryManager) Decommit(addr hostarch.Addr, length uint64) error {
// to the rest (but returns ENOMEM from the system call, as it should)." -
// madvise(2)
if mm.vmas.SpanRange(ar) != ar.Length() {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
return nil
}
@@ -1114,18 +1115,18 @@ type MSyncOpts struct {
// MSync implements the semantics of Linux's msync().
func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length uint64, opts MSyncOpts) error {
if addr != addr.RoundDown() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if length == 0 {
return nil
}
la, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
mm.mappingMu.RLock()
@@ -1133,7 +1134,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
vseg := mm.vmas.LowerBoundSegment(ar.Start)
if !vseg.Ok() {
mm.mappingMu.RUnlock()
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
var unmapped bool
lastEnd := ar.Start
@@ -1150,7 +1151,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
vma := vseg.ValuePtr()
if opts.Invalidate && vma.mlockMode != memmap.MLockNone {
mm.mappingMu.RUnlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// It's only possible to have dirtied the Mappable through a shared
// mapping. Don't check if the mapping is writable, because mprotect
@@ -1182,7 +1183,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
}
if unmapped {
- return syserror.ENOMEM
+ return linuxerr.ENOMEM
}
return nil
}
@@ -1191,7 +1192,7 @@ func (mm *MemoryManager) MSync(ctx context.Context, addr hostarch.Addr, length u
func (mm *MemoryManager) GetSharedFutexKey(ctx context.Context, addr hostarch.Addr) (futex.Key, error) {
ar, ok := addr.ToRange(4) // sizeof(int32).
if !ok {
- return futex.Key{}, syserror.EFAULT
+ return futex.Key{}, linuxerr.EFAULT
}
mm.mappingMu.RLock()
diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go
index 0d019e41d..e34b7a2f7 100644
--- a/pkg/sentry/mm/vma.go
+++ b/pkg/sentry/mm/vma.go
@@ -19,12 +19,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Preconditions:
@@ -58,7 +58,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
newUsageAS -= uint64(mm.vmas.SpanRange(ar))
}
if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS {
- return vmaIterator{}, hostarch.AddrRange{}, syserror.ENOMEM
+ return vmaIterator{}, hostarch.AddrRange{}, linuxerr.ENOMEM
}
if opts.MLockMode != memmap.MLockNone {
@@ -66,14 +66,14 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp
if creds := auth.CredentialsFromContext(ctx); !creds.HasCapabilityIn(linux.CAP_IPC_LOCK, creds.UserNamespace.Root()) {
mlockLimit := limits.FromContext(ctx).Get(limits.MemoryLocked).Cur
if mlockLimit == 0 {
- return vmaIterator{}, hostarch.AddrRange{}, syserror.EPERM
+ return vmaIterator{}, hostarch.AddrRange{}, linuxerr.EPERM
}
newLockedAS := mm.lockedAS + opts.Length
if opts.Unmap {
newLockedAS -= mm.mlockedBytesRangeLocked(ar)
}
if newLockedAS > mlockLimit {
- return vmaIterator{}, hostarch.AddrRange{}, syserror.EAGAIN
+ return vmaIterator{}, hostarch.AddrRange{}, linuxerr.EAGAIN
}
}
}
@@ -177,7 +177,7 @@ func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOp
// Fixed mappings accept only the requested address.
if opts.Fixed {
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
// Prefer hugepage alignment if a hugepage or more is requested.
@@ -215,7 +215,7 @@ func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bou
return gr.Start, nil
}
}
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
// Preconditions: mm.mappingMu must be locked.
@@ -235,7 +235,7 @@ func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bo
return start, nil
}
}
- return 0, syserror.ENOMEM
+ return 0, linuxerr.ENOMEM
}
// Preconditions: mm.mappingMu must be locked.
@@ -288,7 +288,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRang
vma := vseg.ValuePtr()
if addr < vseg.Start() {
// TODO(jamieliu): Implement vma.growsDown here.
- return vbegin, vgap, syserror.EFAULT
+ return vbegin, vgap, linuxerr.EFAULT
}
perms := vma.effectivePerms
@@ -296,7 +296,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRang
perms = vma.maxPerms
}
if !perms.SupersetOf(at) {
- return vbegin, vgap, syserror.EPERM
+ return vbegin, vgap, linuxerr.EPERM
}
addr = vseg.End()
@@ -308,7 +308,7 @@ func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar hostarch.AddrRang
}
// Ran out of vmas before ar.End.
- return vbegin, vgap, syserror.EFAULT
+ return vbegin, vgap, linuxerr.EFAULT
}
// getVecVMAsLocked ensures that vmas exist for all addresses in ars, and
diff --git a/pkg/sentry/pgalloc/BUILD b/pkg/sentry/pgalloc/BUILD
index 57d73d770..496a9fd97 100644
--- a/pkg/sentry/pgalloc/BUILD
+++ b/pkg/sentry/pgalloc/BUILD
@@ -85,6 +85,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/memutil",
@@ -96,7 +97,6 @@ go_library(
"//pkg/state",
"//pkg/state/wire",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"@org_golang_x_sys//unix:go_default_library",
],
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index d1a883da4..68e17d343 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -31,6 +31,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/safemem"
@@ -38,7 +39,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// MemoryFile is a memmap.File whose pages may be allocated to arbitrary
@@ -403,7 +403,7 @@ func (f *MemoryFile) Allocate(length uint64, kind usage.MemoryKind) (memmap.File
// Find a range in the underlying file.
fr, ok := findAvailableRange(&f.usage, f.fileSize, length, alignment)
if !ok {
- return memmap.FileRange{}, syserror.ENOMEM
+ return memmap.FileRange{}, linuxerr.ENOMEM
}
// Expand the file if needed.
@@ -674,7 +674,7 @@ func (f *MemoryFile) MapInternal(fr memmap.FileRange, at hostarch.AccessType) (s
panic(fmt.Sprintf("invalid range: %v", fr))
}
if at.Execute {
- return safemem.BlockSeq{}, syserror.EACCES
+ return safemem.BlockSeq{}, linuxerr.EACCES
}
chunks := ((fr.End + chunkMask) >> chunkShift) - (fr.Start >> chunkShift)
@@ -944,7 +944,7 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
// NOTE(b/165896008): mincore (which is passed as checkCommitted)
// by f.UpdateUsage() might take a really long time. So unlock f.mu
// while checkCommitted runs.
- f.mu.Unlock()
+ f.mu.Unlock() // +checklocksforce
err := checkCommitted(s, buf)
f.mu.Lock()
if err != nil {
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.go b/pkg/sentry/platform/kvm/bluepill_amd64.go
index d761bbdee..0567c8d32 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64.go
+++ b/pkg/sentry/platform/kvm/bluepill_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
@@ -74,8 +75,27 @@ func (c *vCPU) KernelSyscall() {
// therefore be guaranteed that there is no floating point state to be
// loaded on resuming from halt. We only worry about saving on exit.
ring0.SaveFloatingPoint(c.floatingPointState.BytePointer()) // escapes: no.
- ring0.Halt()
- ring0.WriteFS(uintptr(regs.Fs_base)) // escapes: no, reload host segment.
+ // N.B. Since KernelSyscall is called when the kernel makes a syscall,
+ // FS_BASE is already set for correct execution of this function.
+ //
+ // Refresher on syscall/exception handling:
+ // 1. When the sentry is in guest mode and makes a syscall, it goes to
+ // sysenter(), which saves the register state (including RIP of SYSCALL
+ // instruction) to vCPU.registers.
+ // 2. It then calls KernelSyscall, which rewinds the IP and executes
+ // HLT.
+ // 3. HLT does a VM-exit to bluepillHandler, which returns from the
+ // signal handler using vCPU.registers, directly to the SYSCALL
+ // instruction.
+ // 4. Later, when we want to re-use the vCPU (perhaps on a different
+ // host thread), we set the new thread's registers in vCPU.registers
+ // (as opposed to setting the KVM registers with KVM_SET_REGS).
+ // 5. KVM_RUN thus enters the guest with the old register state,
+ // immediately following the HLT instruction, returning here.
+ // 6. We then restore FS_BASE and the full registers from vCPU.register
+ // to return from sysenter() back to the desired bluepill point from
+ // the host.
+ ring0.HaltAndWriteFSBase(regs) // escapes: no, reload host segment.
}
// KernelException handles kernel exceptions.
@@ -93,8 +113,8 @@ func (c *vCPU) KernelException(vector ring0.Vector) {
}
// See above.
ring0.SaveFloatingPoint(c.floatingPointState.BytePointer()) // escapes: no.
- ring0.Halt()
- ring0.WriteFS(uintptr(regs.Fs_base)) // escapes: no; reload host segment.
+ // See above.
+ ring0.HaltAndWriteFSBase(regs) // escapes: no, reload host segment.
}
// bluepillArchExit is called during bluepillEnter.
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.s b/pkg/sentry/platform/kvm/bluepill_amd64.s
index 953024600..c2a1dca11 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64.s
+++ b/pkg/sentry/platform/kvm/bluepill_amd64.s
@@ -37,7 +37,15 @@ TEXT ·bluepill(SB),NOSPLIT,$0
begin:
MOVQ vcpu+0(FP), AX
LEAQ VCPU_CPU(AX), BX
+
+ // The gorountine stack will be changed in guest which renders
+ // the frame pointer outdated and misleads perf tools.
+ // Disconnect the frame-chain with the zeroed frame pointer
+ // when it is saved in the frame in bluepillHandler().
+ MOVQ BP, CX
+ MOVQ $0, BP
BYTE CLI;
+ MOVQ CX, BP
check_vcpu:
MOVQ ENTRY_CPU_SELF(GS), CX
CMPQ BX, CX
diff --git a/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
index 198bafdea..4ba1d6f9c 100644
--- a/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
diff --git a/pkg/sentry/platform/kvm/bluepill_arm64.go b/pkg/sentry/platform/kvm/bluepill_arm64.go
index 578852c3f..acb0cb05f 100644
--- a/pkg/sentry/platform/kvm/bluepill_arm64.go
+++ b/pkg/sentry/platform/kvm/bluepill_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
@@ -25,29 +26,6 @@ import (
var (
// The action for bluepillSignal is changed by sigaction().
bluepillSignal = unix.SIGILL
-
- // vcpuSErrBounce is the event of system error for bouncing KVM.
- vcpuSErrBounce = kvmVcpuEvents{
- exception: exception{
- sErrPending: 1,
- },
- }
-
- // vcpuSErrNMI is the event of system error to trigger sigbus.
- vcpuSErrNMI = kvmVcpuEvents{
- exception: exception{
- sErrPending: 1,
- sErrHasEsr: 1,
- sErrEsr: _ESR_ELx_SERR_NMI,
- },
- }
-
- // vcpuExtDabt is the event of ext_dabt.
- vcpuExtDabt = kvmVcpuEvents{
- exception: exception{
- extDabtPending: 1,
- },
- }
)
// getTLS returns the value of TPIDR_EL0 register.
diff --git a/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go b/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
index 07fc4f216..ee7dba828 100644
--- a/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
@@ -80,11 +81,18 @@ func getHypercallID(addr uintptr) int {
//
//go:nosplit
func bluepillStopGuest(c *vCPU) {
+ // vcpuSErrBounce is the event of system error for bouncing KVM.
+ vcpuSErrBounce := &kvmVcpuEvents{
+ exception: exception{
+ sErrPending: 1,
+ },
+ }
+
if _, _, errno := unix.RawSyscall( // escapes: no.
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
- uintptr(unsafe.Pointer(&vcpuSErrBounce))); errno != 0 {
+ uintptr(unsafe.Pointer(vcpuSErrBounce))); errno != 0 {
throw("bounce sErr injection failed")
}
}
@@ -93,12 +101,21 @@ func bluepillStopGuest(c *vCPU) {
//
//go:nosplit
func bluepillSigBus(c *vCPU) {
+ // vcpuSErrNMI is the event of system error to trigger sigbus.
+ vcpuSErrNMI := &kvmVcpuEvents{
+ exception: exception{
+ sErrPending: 1,
+ sErrHasEsr: 1,
+ sErrEsr: _ESR_ELx_SERR_NMI,
+ },
+ }
+
// Host must support ARM64_HAS_RAS_EXTN.
if _, _, errno := unix.RawSyscall( // escapes: no.
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
- uintptr(unsafe.Pointer(&vcpuSErrNMI))); errno != 0 {
+ uintptr(unsafe.Pointer(vcpuSErrNMI))); errno != 0 {
if errno == unix.EINVAL {
throw("No ARM64_HAS_RAS_EXTN feature in host.")
}
@@ -110,11 +127,18 @@ func bluepillSigBus(c *vCPU) {
//
//go:nosplit
func bluepillExtDabt(c *vCPU) {
+ // vcpuExtDabt is the event of ext_dabt.
+ vcpuExtDabt := &kvmVcpuEvents{
+ exception: exception{
+ extDabtPending: 1,
+ },
+ }
+
if _, _, errno := unix.RawSyscall( // escapes: no.
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_VCPU_EVENTS,
- uintptr(unsafe.Pointer(&vcpuExtDabt))); errno != 0 {
+ uintptr(unsafe.Pointer(vcpuExtDabt))); errno != 0 {
throw("ext_dabt injection failed")
}
}
diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go
index 28a613a54..7a3c97c5a 100644
--- a/pkg/sentry/platform/kvm/bluepill_fault.go
+++ b/pkg/sentry/platform/kvm/bluepill_fault.go
@@ -55,11 +55,7 @@ func calculateBluepillFault(physical uintptr, phyRegions []physicalRegion) (virt
}
// Adjust the block to match our size.
- physicalStart = alignedPhysical & faultBlockMask
- if physicalStart < pr.physical {
- // Bound the starting point to the start of the region.
- physicalStart = pr.physical
- }
+ physicalStart = pr.physical + (alignedPhysical-pr.physical)&faultBlockMask
virtualStart = pr.virtual + (physicalStart - pr.physical)
physicalEnd := physicalStart + faultBlockSize
if physicalEnd > end {
@@ -101,7 +97,7 @@ func handleBluepillFault(m *machine, physical uintptr, phyRegions []physicalRegi
// Store the physical address in the slot. This is used to
// avoid calls to handleBluepillFault in the future (see
// machine.mapPhysical).
- atomic.StoreUintptr(&m.usedSlots[slot], physical)
+ atomic.StoreUintptr(&m.usedSlots[slot], physicalStart)
// Successfully added region; we can increment nextSlot and
// allow another set to proceed here.
atomic.StoreUint32(&m.nextSlot, slot+1)
diff --git a/pkg/sentry/platform/kvm/bluepill_unsafe.go b/pkg/sentry/platform/kvm/bluepill_unsafe.go
index 6f87236ad..0f0c1e73b 100644
--- a/pkg/sentry/platform/kvm/bluepill_unsafe.go
+++ b/pkg/sentry/platform/kvm/bluepill_unsafe.go
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.12
// +build go1.12
-// +build !go1.18
-// Check go:linkname function signatures when updating Go version.
+// //go:linkname directives type-checked by checklinkname. Any other
+// non-linkname assumptions outside the Go 1 compatibility guarantee should
+// have an accompanied vet check or version guard build tag.
package kvm
@@ -28,7 +30,7 @@ import (
)
//go:linkname throw runtime.throw
-func throw(string)
+func throw(s string)
// vCPUPtr returns a CPU for the given address.
//
@@ -85,6 +87,13 @@ func bluepillGuestExit(c *vCPU, context unsafe.Pointer) {
// signal stack. It should only execute raw system calls and functions that are
// explicitly marked go:nosplit.
//
+// Ideally, this function should switch to gsignal, as runtime.sigtramp does,
+// but that is tedious given all the runtime internals. That said, using
+// gsignal inside a signal handler is not _required_, provided we avoid stack
+// splits and allocations. Note that calling any splittable function here will
+// be flaky; if the signal stack is below the G stack then we will trigger a
+// split and crash. If above, we won't trigger a split.
+//
// +checkescape:all
//
//go:nosplit
diff --git a/pkg/sentry/platform/kvm/kvm_amd64.go b/pkg/sentry/platform/kvm/kvm_amd64.go
index b9ed4a706..a5189d9e2 100644
--- a/pkg/sentry/platform/kvm/kvm_amd64.go
+++ b/pkg/sentry/platform/kvm/kvm_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_test.go b/pkg/sentry/platform/kvm/kvm_amd64_test.go
index b1cab89a0..c3fbbdc75 100644
--- a/pkg/sentry/platform/kvm/kvm_amd64_test.go
+++ b/pkg/sentry/platform/kvm/kvm_amd64_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
@@ -28,7 +29,7 @@ import (
)
func TestSegments(t *testing.T) {
- applicationTest(t, true, testutil.TwiddleSegments, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTwiddleSegments(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTestSegments(regs)
for {
var si linux.SignalInfo
@@ -55,7 +56,7 @@ func TestSegments(t *testing.T) {
func stmxcsr(addr *uint32)
func TestMXCSR(t *testing.T) {
- applicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
switchOpts := ring0.SwitchOpts{
Registers: regs,
diff --git a/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
index 0c43d72f4..7fdb6ac64 100644
--- a/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_arm64.go b/pkg/sentry/platform/kvm/kvm_arm64.go
index b73340f0e..159808433 100644
--- a/pkg/sentry/platform/kvm/kvm_arm64.go
+++ b/pkg/sentry/platform/kvm/kvm_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_test.go b/pkg/sentry/platform/kvm/kvm_arm64_test.go
index 0e3d84d95..b53e354da 100644
--- a/pkg/sentry/platform/kvm/kvm_arm64_test.go
+++ b/pkg/sentry/platform/kvm/kvm_arm64_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go b/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
index f07a9f34d..54d579a2b 100644
--- a/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/kvm_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
diff --git a/pkg/sentry/platform/kvm/kvm_test.go b/pkg/sentry/platform/kvm/kvm_test.go
index fe570aff9..3a30286e2 100644
--- a/pkg/sentry/platform/kvm/kvm_test.go
+++ b/pkg/sentry/platform/kvm/kvm_test.go
@@ -120,13 +120,13 @@ func TestKernelFloatingPoint(t *testing.T) {
})
}
-func applicationTest(t testHarness, useHostMappings bool, target func(), fn func(*vCPU, *arch.Registers, *pagetables.PageTables) bool) {
+func applicationTest(t testHarness, useHostMappings bool, targetFn uintptr, fn func(*vCPU, *arch.Registers, *pagetables.PageTables) bool) {
// Initialize registers & page tables.
var (
regs arch.Registers
pt *pagetables.PageTables
)
- testutil.SetTestTarget(&regs, target)
+ testutil.SetTestTarget(&regs, targetFn)
kvmTest(t, func(k *KVM) {
// Create new page tables.
@@ -157,7 +157,7 @@ func applicationTest(t testHarness, useHostMappings bool, target func(), fn func
}
func TestApplicationSyscall(t *testing.T) {
- applicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
@@ -171,7 +171,7 @@ func TestApplicationSyscall(t *testing.T) {
}
return false
})
- applicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
@@ -187,7 +187,7 @@ func TestApplicationSyscall(t *testing.T) {
}
func TestApplicationFault(t *testing.T) {
- applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTouchTarget(regs, nil) // Cause fault.
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
@@ -202,7 +202,7 @@ func TestApplicationFault(t *testing.T) {
}
return false
})
- applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTouchTarget(regs, nil) // Cause fault.
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
@@ -219,7 +219,7 @@ func TestApplicationFault(t *testing.T) {
}
func TestRegistersSyscall(t *testing.T) {
- applicationTest(t, true, testutil.TwiddleRegsSyscall, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTwiddleRegsSyscall(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTestRegs(regs) // Fill values for all registers.
for {
var si linux.SignalInfo
@@ -242,7 +242,7 @@ func TestRegistersSyscall(t *testing.T) {
}
func TestRegistersFault(t *testing.T) {
- applicationTest(t, true, testutil.TwiddleRegsFault, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTwiddleRegsFault(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTestRegs(regs) // Fill values for all registers.
for {
var si linux.SignalInfo
@@ -266,7 +266,7 @@ func TestRegistersFault(t *testing.T) {
}
func TestBounce(t *testing.T) {
- applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
go func() {
time.Sleep(time.Millisecond)
c.BounceToKernel()
@@ -281,7 +281,7 @@ func TestBounce(t *testing.T) {
}
return false
})
- applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
go func() {
time.Sleep(time.Millisecond)
c.BounceToKernel()
@@ -300,7 +300,7 @@ func TestBounce(t *testing.T) {
}
func TestBounceStress(t *testing.T) {
- applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfSpinLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
randomSleep := func() {
// O(hundreds of microseconds) is appropriate to ensure
// different overlaps and different schedules.
@@ -336,7 +336,7 @@ func TestBounceStress(t *testing.T) {
func TestInvalidate(t *testing.T) {
var data uintptr // Used below.
- applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, true, testutil.AddrOfTouch(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
testutil.SetTouchTarget(regs, &data) // Read legitimate value.
for {
var si linux.SignalInfo
@@ -377,7 +377,7 @@ func IsFault(err error, si *linux.SignalInfo) bool {
}
func TestEmptyAddressSpace(t *testing.T) {
- applicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, false, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
@@ -391,7 +391,7 @@ func TestEmptyAddressSpace(t *testing.T) {
}
return false
})
- applicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(t, false, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
@@ -467,7 +467,7 @@ func BenchmarkApplicationSyscall(b *testing.B) {
i int // Iteration includes machine.Get() / machine.Put().
a int // Count for ErrContextInterrupt.
)
- applicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(b, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
@@ -489,7 +489,7 @@ func BenchmarkApplicationSyscall(b *testing.B) {
func BenchmarkKernelSyscall(b *testing.B) {
// Note that the target passed here is irrelevant, we never execute SwitchToUser.
- applicationTest(b, true, testutil.Getpid, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(b, true, testutil.AddrOfGetpid(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
// iteration does not include machine.Get() / machine.Put().
for i := 0; i < b.N; i++ {
testutil.Getpid()
@@ -504,7 +504,7 @@ func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) {
i int
a int
)
- applicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
+ applicationTest(b, true, testutil.AddrOfSyscallLoop(), func(c *vCPU, regs *arch.Registers, pt *pagetables.PageTables) bool {
var si linux.SignalInfo
if _, err := c.SwitchToUser(ring0.SwitchOpts{
Registers: regs,
diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go
index 1b5d5f66e..d67563958 100644
--- a/pkg/sentry/platform/kvm/machine.go
+++ b/pkg/sentry/platform/kvm/machine.go
@@ -70,7 +70,7 @@ type machine struct {
// tscControl checks whether cpu supports TSC scaling
tscControl bool
- // usedSlots is the set of used physical addresses (sorted).
+ // usedSlots is the set of used physical addresses (not sorted).
usedSlots []uintptr
// nextID is the next vCPU ID.
@@ -296,13 +296,20 @@ func newMachine(vm int) (*machine, error) {
return m, nil
}
-// hasSlot returns true iff the given address is mapped.
+// hasSlot returns true if the given address is mapped.
//
// This must be done via a linear scan.
//
//go:nosplit
func (m *machine) hasSlot(physical uintptr) bool {
- for i := 0; i < len(m.usedSlots); i++ {
+ slotLen := int(atomic.LoadUint32(&m.nextSlot))
+ // When slots are being updated, nextSlot is ^uint32(0). As this situation
+ // is less likely happen, we just set the slotLen to m.maxSlots, and scan
+ // the whole usedSlots array.
+ if slotLen == int(^uint32(0)) {
+ slotLen = m.maxSlots
+ }
+ for i := 0; i < slotLen; i++ {
if p := atomic.LoadUintptr(&m.usedSlots[i]); p == physical {
return true
}
@@ -512,15 +519,21 @@ func (c *vCPU) lock() {
//
//go:nosplit
func (c *vCPU) unlock() {
- if atomic.CompareAndSwapUint32(&c.state, vCPUUser|vCPUGuest, vCPUGuest) {
+ origState := atomicbitops.CompareAndSwapUint32(&c.state, vCPUUser|vCPUGuest, vCPUGuest)
+ if origState == vCPUUser|vCPUGuest {
// Happy path: no exits are forced, and we can continue
// executing on our merry way with a single atomic access.
return
}
// Clear the lock.
- origState := atomic.LoadUint32(&c.state)
- atomicbitops.AndUint32(&c.state, ^vCPUUser)
+ for {
+ state := atomicbitops.CompareAndSwapUint32(&c.state, origState, origState&^vCPUUser)
+ if state == origState {
+ break
+ }
+ origState = state
+ }
switch origState {
case vCPUUser:
// Normal state.
diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go
index b8e1cd72c..a96634381 100644
--- a/pkg/sentry/platform/kvm/machine_amd64.go
+++ b/pkg/sentry/platform/kvm/machine_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
@@ -136,7 +137,7 @@ func (c *vCPU) initArchState() error {
}
// Set the entrypoint for the kernel.
- kernelUserRegs.RIP = uint64(reflect.ValueOf(ring0.Start).Pointer())
+ kernelUserRegs.RIP = uint64(ring0.AddrOfStart())
kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer())
kernelUserRegs.RSP = c.StackTop()
kernelUserRegs.RFLAGS = ring0.KernelFlagsSet
@@ -469,7 +470,7 @@ func availableRegionsForSetMem() (phyRegions []physicalRegion) {
}
func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
- // Map all the executible regions so that all the entry functions
+ // Map all the executable regions so that all the entry functions
// are mapped in the upper half.
applyVirtualRegions(func(vr virtualRegion) {
if excludeVirtualRegion(vr) || vr.filename == "[vsyscall]" {
@@ -485,7 +486,7 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
pageTable.Map(
hostarch.Addr(ring0.KernelStartAddress|r.virtual),
r.length,
- pagetables.MapOpts{AccessType: hostarch.Execute},
+ pagetables.MapOpts{AccessType: hostarch.Execute, Global: true},
physical)
}
})
@@ -498,7 +499,7 @@ func (m *machine) mapUpperHalf(pageTable *pagetables.PageTables) {
pageTable.Map(
hostarch.Addr(ring0.KernelStartAddress|start),
regionLen,
- pagetables.MapOpts{AccessType: hostarch.ReadWrite},
+ pagetables.MapOpts{AccessType: hostarch.ReadWrite, Global: true},
physical)
}
}
diff --git a/pkg/sentry/platform/kvm/machine_amd64_unsafe.go b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
index 83bcc7406..de798bb2c 100644
--- a/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package kvm
diff --git a/pkg/sentry/platform/kvm/machine_arm64.go b/pkg/sentry/platform/kvm/machine_arm64.go
index edaccf9bc..7937a8481 100644
--- a/pkg/sentry/platform/kvm/machine_arm64.go
+++ b/pkg/sentry/platform/kvm/machine_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
diff --git a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
index 1b0a6e0a7..1a4a9ce7d 100644
--- a/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package kvm
@@ -140,22 +141,15 @@ func (c *vCPU) initArchState() error {
// vbar_el1
reg.id = _KVM_ARM64_REGS_VBAR_EL1
-
- fromLocation := reflect.ValueOf(ring0.Vectors).Pointer()
- offset := fromLocation & (1<<11 - 1)
- if offset != 0 {
- offset = 1<<11 - offset
- }
-
- toLocation := fromLocation + offset
- data = uint64(ring0.KernelStartAddress | toLocation)
+ vectorLocation := reflect.ValueOf(ring0.Vectors).Pointer()
+ data = uint64(ring0.KernelStartAddress | vectorLocation)
if err := c.setOneRegister(&reg); err != nil {
return err
}
// Use the address of the exception vector table as
// the MMIO address base.
- arm64HypercallMMIOBase = toLocation
+ arm64HypercallMMIOBase = vectorLocation
// Initialize the PCID database.
if hasGuestPCID {
diff --git a/pkg/sentry/platform/kvm/machine_unsafe.go b/pkg/sentry/platform/kvm/machine_unsafe.go
index 49e1c7136..cc3a1253b 100644
--- a/pkg/sentry/platform/kvm/machine_unsafe.go
+++ b/pkg/sentry/platform/kvm/machine_unsafe.go
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.12
// +build go1.12
-// +build !go1.18
-// Check go:linkname function signatures when updating Go version.
+// //go:linkname directives type-checked by checklinkname. Any other
+// non-linkname assumptions outside the Go 1 compatibility guarantee should
+// have an accompanied vet check or version guard build tag.
package kvm
diff --git a/pkg/sentry/platform/kvm/testutil/testutil.go b/pkg/sentry/platform/kvm/testutil/testutil.go
index 5c1efa0fd..d8c273796 100644
--- a/pkg/sentry/platform/kvm/testutil/testutil.go
+++ b/pkg/sentry/platform/kvm/testutil/testutil.go
@@ -23,23 +23,41 @@ import (
// Getpid executes a trivial system call.
func Getpid()
-// Touch touches the value in the first register.
-func Touch()
+// AddrOfGetpid returns the address of Getpid.
+//
+// In Go 1.17+, Go references to assembly functions resolve to an ABIInternal
+// wrapper function rather than the function itself. We must reference from
+// assembly to get the ABI0 (i.e., primary) address.
+func AddrOfGetpid() uintptr
+
+// AddrOfTouch returns the address of a function that touches the value in the
+// first register.
+func AddrOfTouch() uintptr
+func touch()
-// SyscallLoop executes a syscall and loops.
-func SyscallLoop()
+// AddrOfSyscallLoop returns the address of a function that executes a syscall
+// and loops.
+func AddrOfSyscallLoop() uintptr
+func syscallLoop()
-// SpinLoop spins on the CPU.
-func SpinLoop()
+// AddrOfSpinLoop returns the address of a function that spins on the CPU.
+func AddrOfSpinLoop() uintptr
+func spinLoop()
-// HaltLoop immediately halts and loops.
-func HaltLoop()
+// AddrOfHaltLoop returns the address of a function that immediately halts and
+// loops.
+func AddrOfHaltLoop() uintptr
+func haltLoop()
-// TwiddleRegsFault twiddles registers then faults.
-func TwiddleRegsFault()
+// AddrOfTwiddleRegsFault returns the address of a function that twiddles
+// registers then faults.
+func AddrOfTwiddleRegsFault() uintptr
+func twiddleRegsFault()
-// TwiddleRegsSyscall twiddles registers then executes a syscall.
-func TwiddleRegsSyscall()
+// AddrOfTwiddleRegsSyscall returns the address of a function that twiddles
+// registers then executes a syscall.
+func AddrOfTwiddleRegsSyscall() uintptr
+func twiddleRegsSyscall()
// FloatingPointWorks is a floating point test.
//
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.go b/pkg/sentry/platform/kvm/testutil/testutil_amd64.go
index 8048eedec..98c52b2f5 100644
--- a/pkg/sentry/platform/kvm/testutil/testutil_amd64.go
+++ b/pkg/sentry/platform/kvm/testutil/testutil_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package testutil
@@ -22,12 +23,14 @@ import (
"gvisor.dev/gvisor/pkg/sentry/arch"
)
-// TwiddleSegments reads segments into known registers.
-func TwiddleSegments()
+// AddrOfTwiddleSegments return the address of a function that reads segments
+// into known registers.
+func AddrOfTwiddleSegments() uintptr
+func twiddleSegments()
// SetTestTarget sets the rip appropriately.
-func SetTestTarget(regs *arch.Registers, fn func()) {
- regs.Rip = uint64(reflect.ValueOf(fn).Pointer())
+func SetTestTarget(regs *arch.Registers, fn uintptr) {
+ regs.Rip = uint64(fn)
}
// SetTouchTarget sets rax appropriately.
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.s b/pkg/sentry/platform/kvm/testutil/testutil_amd64.s
index 491ec0c2a..65e7c05ea 100644
--- a/pkg/sentry/platform/kvm/testutil/testutil_amd64.s
+++ b/pkg/sentry/platform/kvm/testutil/testutil_amd64.s
@@ -25,27 +25,46 @@ TEXT ·Getpid(SB),NOSPLIT,$0
SYSCALL
RET
-TEXT ·Touch(SB),NOSPLIT,$0
+// func AddrOfGetpid() uintptr
+TEXT ·AddrOfGetpid(SB), $0-8
+ MOVQ $·Getpid(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·touch(SB),NOSPLIT,$0
start:
MOVQ 0(AX), BX // deref AX
MOVQ $39, AX // getpid
SYSCALL
JMP start
-TEXT ·HaltLoop(SB),NOSPLIT,$0
-start:
- HLT
- JMP start
+// func AddrOfTouch() uintptr
+TEXT ·AddrOfTouch(SB), $0-8
+ MOVQ $·touch(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
-TEXT ·SyscallLoop(SB),NOSPLIT,$0
+TEXT ·syscallLoop(SB),NOSPLIT,$0
start:
SYSCALL
JMP start
-TEXT ·SpinLoop(SB),NOSPLIT,$0
+// func AddrOfSyscallLoop() uintptr
+TEXT ·AddrOfSyscallLoop(SB), $0-8
+ MOVQ $·syscallLoop(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·spinLoop(SB),NOSPLIT,$0
start:
JMP start
+// func AddrOfSpinLoop() uintptr
+TEXT ·AddrOfSpinLoop(SB), $0-8
+ MOVQ $·spinLoop(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
+
TEXT ·FloatingPointWorks(SB),NOSPLIT,$0-8
NO_LOCAL_POINTERS
MOVQ $1, AX
@@ -75,20 +94,32 @@ TEXT ·FloatingPointWorks(SB),NOSPLIT,$0-8
NOTQ DI; \
NOTQ SP;
-TEXT ·TwiddleRegsSyscall(SB),NOSPLIT,$0
+TEXT ·twiddleRegsSyscall(SB),NOSPLIT,$0
TWIDDLE_REGS()
SYSCALL
RET // never reached
-TEXT ·TwiddleRegsFault(SB),NOSPLIT,$0
+// func AddrOfTwiddleRegsSyscall() uintptr
+TEXT ·AddrOfTwiddleRegsSyscall(SB), $0-8
+ MOVQ $·twiddleRegsSyscall(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·twiddleRegsFault(SB),NOSPLIT,$0
TWIDDLE_REGS()
JMP AX // must fault
RET // never reached
+// func AddrOfTwiddleRegsFault() uintptr
+TEXT ·AddrOfTwiddleRegsFault(SB), $0-8
+ MOVQ $·twiddleRegsFault(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
+
#define READ_FS() BYTE $0x64; BYTE $0x48; BYTE $0x8b; BYTE $0x00;
#define READ_GS() BYTE $0x65; BYTE $0x48; BYTE $0x8b; BYTE $0x00;
-TEXT ·TwiddleSegments(SB),NOSPLIT,$0
+TEXT ·twiddleSegments(SB),NOSPLIT,$0
MOVQ $0x0, AX
READ_GS()
MOVQ AX, BX
@@ -96,3 +127,9 @@ TEXT ·TwiddleSegments(SB),NOSPLIT,$0
READ_FS()
SYSCALL
RET // never reached
+
+// func AddrOfTwiddleSegments() uintptr
+TEXT ·AddrOfTwiddleSegments(SB), $0-8
+ MOVQ $·twiddleSegments(SB), AX
+ MOVQ AX, ret+0(FP)
+ RET
diff --git a/pkg/sentry/platform/kvm/testutil/testutil_arm64.go b/pkg/sentry/platform/kvm/testutil/testutil_arm64.go
index c5235ca9d..6d0ba8252 100644
--- a/pkg/sentry/platform/kvm/testutil/testutil_arm64.go
+++ b/pkg/sentry/platform/kvm/testutil/testutil_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package testutil
diff --git a/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
index 4f7fe993a..07eda0ef3 100644
--- a/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
+++ b/pkg/sentry/platform/ptrace/ptrace_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package ptrace
diff --git a/pkg/sentry/platform/ptrace/subprocess_amd64.go b/pkg/sentry/platform/ptrace/subprocess_amd64.go
index 90b1ead56..13a55b784 100644
--- a/pkg/sentry/platform/ptrace/subprocess_amd64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package ptrace
@@ -176,6 +177,7 @@ func patchSignalInfo(regs *arch.Registers, signalInfo *linux.SignalInfo) {
//
// This is safe to call in an afterFork context.
//
+//go:norace
//go:nosplit
func enableCpuidFault() {
unix.RawSyscall6(unix.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0, 0, 0, 0)
diff --git a/pkg/sentry/platform/ptrace/subprocess_arm64.go b/pkg/sentry/platform/ptrace/subprocess_arm64.go
index e4257e3bf..8181db659 100644
--- a/pkg/sentry/platform/ptrace/subprocess_arm64.go
+++ b/pkg/sentry/platform/ptrace/subprocess_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package ptrace
diff --git a/pkg/sentry/platform/ptrace/subprocess_linux.go b/pkg/sentry/platform/ptrace/subprocess_linux.go
index 4f0260432..129ca52e2 100644
--- a/pkg/sentry/platform/ptrace/subprocess_linux.go
+++ b/pkg/sentry/platform/ptrace/subprocess_linux.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package ptrace
@@ -120,6 +121,17 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
return nil, err
}
+ return forkStub(flags, instrs)
+}
+
+// In the child, this function must not acquire any locks, because they might
+// have been locked at the time of the fork. This means no rescheduling, no
+// malloc calls, and no new stack segments. For the same reason compiler does
+// not race instrument it.
+//
+//
+//go:norace
+func forkStub(flags uintptr, instrs []linux.BPFInstruction) (*thread, error) {
// Declare all variables up front in order to ensure that there's no
// need for allocations between beforeFork & afterFork.
var (
@@ -181,7 +193,7 @@ func attachedThread(flags uintptr, defaultAction linux.BPFAction) (*thread, erro
// Set an aggressive BPF filter for the stub and all it's children. See
// the description of the BPF program built above.
- if errno := seccomp.SetFilter(instrs); errno != 0 {
+ if errno := seccomp.SetFilterInChild(instrs); errno != 0 {
unix.RawSyscall(unix.SYS_EXIT, uintptr(errno), 0, 0)
}
diff --git a/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go b/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
index 9c342c59b..f1e84059d 100644
--- a/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
+++ b/pkg/sentry/platform/ptrace/subprocess_linux_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (amd64 || arm64)
// +build linux
// +build amd64 arm64
@@ -26,6 +27,7 @@ import (
// unmaskAllSignals unmasks all signals on the current thread.
//
+//go:norace
//go:nosplit
func unmaskAllSignals() unix.Errno {
var set linux.SignalSet
diff --git a/pkg/sentry/platform/ptrace/subprocess_unsafe.go b/pkg/sentry/platform/ptrace/subprocess_unsafe.go
index 38b7b1a5e..304722200 100644
--- a/pkg/sentry/platform/ptrace/subprocess_unsafe.go
+++ b/pkg/sentry/platform/ptrace/subprocess_unsafe.go
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.12
// +build go1.12
-// +build !go1.18
-// Check go:linkname function signatures when updating Go version.
+// //go:linkname directives type-checked by checklinkname. Any other
+// non-linkname assumptions outside the Go 1 compatibility guarantee should
+// have an accompanied vet check or version guard build tag.
package ptrace
diff --git a/pkg/sentry/seccheck/BUILD b/pkg/sentry/seccheck/BUILD
new file mode 100644
index 000000000..943fa180d
--- /dev/null
+++ b/pkg/sentry/seccheck/BUILD
@@ -0,0 +1,54 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools/go_fieldenum:defs.bzl", "go_fieldenum")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
+
+licenses(["notice"])
+
+go_fieldenum(
+ name = "seccheck_fieldenum",
+ srcs = [
+ "clone.go",
+ "task.go",
+ ],
+ out = "seccheck_fieldenum.go",
+ package = "seccheck",
+)
+
+go_template_instance(
+ name = "seqatomic_checkerslice",
+ out = "seqatomic_checkerslice_unsafe.go",
+ package = "seccheck",
+ suffix = "CheckerSlice",
+ template = "//pkg/sync/seqatomic:generic_seqatomic",
+ types = {
+ "Value": "[]Checker",
+ },
+)
+
+go_library(
+ name = "seccheck",
+ srcs = [
+ "clone.go",
+ "seccheck.go",
+ "seccheck_fieldenum.go",
+ "seqatomic_checkerslice_unsafe.go",
+ "task.go",
+ ],
+ visibility = ["//:sandbox"],
+ deps = [
+ "//pkg/abi/linux",
+ "//pkg/context",
+ "//pkg/gohacks",
+ "//pkg/sentry/kernel/auth",
+ "//pkg/sentry/kernel/time",
+ "//pkg/sync",
+ ],
+)
+
+go_test(
+ name = "seccheck_test",
+ size = "small",
+ srcs = ["seccheck_test.go"],
+ library = ":seccheck",
+ deps = ["//pkg/context"],
+)
diff --git a/pkg/sentry/seccheck/clone.go b/pkg/sentry/seccheck/clone.go
new file mode 100644
index 000000000..7546fa021
--- /dev/null
+++ b/pkg/sentry/seccheck/clone.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seccheck
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+)
+
+// CloneInfo contains information used by the Clone checkpoint.
+//
+// +fieldenum Clone
+type CloneInfo struct {
+ // Invoker identifies the invoking thread.
+ Invoker TaskInfo
+
+ // Credentials are the invoking thread's credentials.
+ Credentials *auth.Credentials
+
+ // Args contains the arguments to kernel.Task.Clone().
+ Args linux.CloneArgs
+
+ // Created identifies the created thread.
+ Created TaskInfo
+}
+
+// CloneReq returns fields required by the Clone checkpoint.
+func (s *state) CloneReq() CloneFieldSet {
+ return s.cloneReq.Load()
+}
+
+// Clone is called at the Clone checkpoint.
+func (s *state) Clone(ctx context.Context, mask CloneFieldSet, info *CloneInfo) error {
+ for _, c := range s.getCheckers() {
+ if err := c.Clone(ctx, mask, *info); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/sentry/seccheck/seccheck.go b/pkg/sentry/seccheck/seccheck.go
new file mode 100644
index 000000000..b6c9d44ce
--- /dev/null
+++ b/pkg/sentry/seccheck/seccheck.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package seccheck defines a structure for dynamically-configured security
+// checks in the sentry.
+package seccheck
+
+import (
+ "sync/atomic"
+
+ "gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// A Point represents a checkpoint, a point at which a security check occurs.
+type Point uint
+
+// PointX represents the checkpoint X.
+const (
+ PointClone Point = iota
+ // Add new Points above this line.
+ pointLength
+
+ numPointBitmaskUint32s = (int(pointLength)-1)/32 + 1
+)
+
+// A Checker performs security checks at checkpoints.
+//
+// Each Checker method X is called at checkpoint X; if the method may return a
+// non-nil error and does so, it causes the checked operation to fail
+// immediately (without calling subsequent Checkers) and return the error. The
+// info argument contains information relevant to the check. The mask argument
+// indicates what fields in info are valid; the mask should usually be a
+// superset of fields requested by the Checker's corresponding CheckerReq, but
+// may be missing requested fields in some cases (e.g. if the Checker is
+// registered concurrently with invocations of checkpoints).
+type Checker interface {
+ Clone(ctx context.Context, mask CloneFieldSet, info CloneInfo) error
+}
+
+// CheckerDefaults may be embedded by implementations of Checker to obtain
+// no-op implementations of Checker methods that may be explicitly overridden.
+type CheckerDefaults struct{}
+
+// Clone implements Checker.Clone.
+func (CheckerDefaults) Clone(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ return nil
+}
+
+// CheckerReq indicates what checkpoints a corresponding Checker runs at, and
+// what information it requires at those checkpoints.
+type CheckerReq struct {
+ // Points are the set of checkpoints for which the corresponding Checker
+ // must be called. Note that methods not specified in Points may still be
+ // called; implementations of Checker may embed CheckerDefaults to obtain
+ // no-op implementations of Checker methods.
+ Points []Point
+
+ // All of the following fields indicate what fields in the corresponding
+ // XInfo struct will be requested at the corresponding checkpoint.
+ Clone CloneFields
+}
+
+// Global is the method receiver of all seccheck functions.
+var Global state
+
+// state is the type of global, and is separated out for testing.
+type state struct {
+ // registrationMu serializes all changes to the set of registered Checkers
+ // for all checkpoints.
+ registrationMu sync.Mutex
+
+ // enabledPoints is a bitmask of checkpoints for which at least one Checker
+ // is registered.
+ //
+ // enabledPoints is accessed using atomic memory operations. Mutation of
+ // enabledPoints is serialized by registrationMu.
+ enabledPoints [numPointBitmaskUint32s]uint32
+
+ // registrationSeq supports store-free atomic reads of registeredCheckers.
+ registrationSeq sync.SeqCount
+
+ // checkers is the set of all registered Checkers in order of execution.
+ //
+ // checkers is accessed using instantiations of SeqAtomic functions.
+ // Mutation of checkers is serialized by registrationMu.
+ checkers []Checker
+
+ // All of the following xReq variables indicate what fields in the
+ // corresponding XInfo struct have been requested by any registered
+ // checker, are accessed using atomic memory operations, and are mutated
+ // with registrationMu locked.
+ cloneReq CloneFieldSet
+}
+
+// AppendChecker registers the given Checker to execute at checkpoints. The
+// Checker will execute after all previously-registered Checkers, and only if
+// those Checkers return a nil error.
+func (s *state) AppendChecker(c Checker, req *CheckerReq) {
+ s.registrationMu.Lock()
+ defer s.registrationMu.Unlock()
+ s.cloneReq.AddFieldsLoadable(req.Clone)
+ s.appendCheckerLocked(c)
+ for _, p := range req.Points {
+ word, bit := p/32, p%32
+ atomic.StoreUint32(&s.enabledPoints[word], s.enabledPoints[word]|(uint32(1)<<bit))
+ }
+}
+
+// Enabled returns true if any Checker is registered for the given checkpoint.
+func (s *state) Enabled(p Point) bool {
+ word, bit := p/32, p%32
+ return atomic.LoadUint32(&s.enabledPoints[word])&(uint32(1)<<bit) != 0
+}
+
+func (s *state) getCheckers() []Checker {
+ return SeqAtomicLoadCheckerSlice(&s.registrationSeq, &s.checkers)
+}
+
+// Preconditions: s.registrationMu must be locked.
+func (s *state) appendCheckerLocked(c Checker) {
+ s.registrationSeq.BeginWrite()
+ s.checkers = append(s.checkers, c)
+ s.registrationSeq.EndWrite()
+}
diff --git a/pkg/sentry/seccheck/seccheck_test.go b/pkg/sentry/seccheck/seccheck_test.go
new file mode 100644
index 000000000..687810d18
--- /dev/null
+++ b/pkg/sentry/seccheck/seccheck_test.go
@@ -0,0 +1,157 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seccheck
+
+import (
+ "errors"
+ "testing"
+
+ "gvisor.dev/gvisor/pkg/context"
+)
+
+type testChecker struct {
+ CheckerDefaults
+
+ onClone func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error
+}
+
+// Clone implements Checker.Clone.
+func (c *testChecker) Clone(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ if c.onClone == nil {
+ return nil
+ }
+ return c.onClone(ctx, mask, info)
+}
+
+func TestNoChecker(t *testing.T) {
+ var s state
+ if s.Enabled(PointClone) {
+ t.Errorf("Enabled(PointClone): got true, wanted false")
+ }
+}
+
+func TestCheckerNotRegisteredForPoint(t *testing.T) {
+ var s state
+ s.AppendChecker(&testChecker{}, &CheckerReq{})
+ if s.Enabled(PointClone) {
+ t.Errorf("Enabled(PointClone): got true, wanted false")
+ }
+}
+
+func TestCheckerRegistered(t *testing.T) {
+ var s state
+ checkerCalled := false
+ s.AppendChecker(&testChecker{onClone: func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ checkerCalled = true
+ return nil
+ }}, &CheckerReq{
+ Points: []Point{PointClone},
+ Clone: CloneFields{
+ Credentials: true,
+ },
+ })
+
+ if !s.Enabled(PointClone) {
+ t.Errorf("Enabled(PointClone): got false, wanted true")
+ }
+ if !s.CloneReq().Contains(CloneFieldCredentials) {
+ t.Errorf("CloneReq().Contains(CloneFieldCredentials): got false, wanted true")
+ }
+ if err := s.Clone(context.Background(), CloneFieldSet{}, &CloneInfo{}); err != nil {
+ t.Errorf("Clone(): got %v, wanted nil", err)
+ }
+ if !checkerCalled {
+ t.Errorf("Clone() did not call Checker.Clone()")
+ }
+}
+
+func TestMultipleCheckersRegistered(t *testing.T) {
+ var s state
+ checkersCalled := [2]bool{}
+ s.AppendChecker(&testChecker{onClone: func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ checkersCalled[0] = true
+ return nil
+ }}, &CheckerReq{
+ Points: []Point{PointClone},
+ Clone: CloneFields{
+ Args: true,
+ },
+ })
+ s.AppendChecker(&testChecker{onClone: func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ checkersCalled[1] = true
+ return nil
+ }}, &CheckerReq{
+ Points: []Point{PointClone},
+ Clone: CloneFields{
+ Created: TaskFields{
+ ThreadID: true,
+ },
+ },
+ })
+
+ if !s.Enabled(PointClone) {
+ t.Errorf("Enabled(PointClone): got false, wanted true")
+ }
+ // CloneReq() should return the union of requested fields from all calls to
+ // AppendChecker.
+ req := s.CloneReq()
+ if !req.Contains(CloneFieldArgs) {
+ t.Errorf("req.Contains(CloneFieldArgs): got false, wanted true")
+ }
+ if !req.Created.Contains(TaskFieldThreadID) {
+ t.Errorf("req.Created.Contains(TaskFieldThreadID): got false, wanted true")
+ }
+ if err := s.Clone(context.Background(), CloneFieldSet{}, &CloneInfo{}); err != nil {
+ t.Errorf("Clone(): got %v, wanted nil", err)
+ }
+ for i := range checkersCalled {
+ if !checkersCalled[i] {
+ t.Errorf("Clone() did not call Checker.Clone() index %d", i)
+ }
+ }
+}
+
+func TestCheckpointReturnsFirstCheckerError(t *testing.T) {
+ errFirstChecker := errors.New("first Checker error")
+ errSecondChecker := errors.New("second Checker error")
+
+ var s state
+ checkersCalled := [2]bool{}
+ s.AppendChecker(&testChecker{onClone: func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ checkersCalled[0] = true
+ return errFirstChecker
+ }}, &CheckerReq{
+ Points: []Point{PointClone},
+ })
+ s.AppendChecker(&testChecker{onClone: func(ctx context.Context, mask CloneFieldSet, info CloneInfo) error {
+ checkersCalled[1] = true
+ return errSecondChecker
+ }}, &CheckerReq{
+ Points: []Point{PointClone},
+ })
+
+ if !s.Enabled(PointClone) {
+ t.Errorf("Enabled(PointClone): got false, wanted true")
+ }
+ if err := s.Clone(context.Background(), CloneFieldSet{}, &CloneInfo{}); err != errFirstChecker {
+ t.Errorf("Clone(): got %v, wanted %v", err, errFirstChecker)
+ }
+ if !checkersCalled[0] {
+ t.Errorf("Clone() did not call first Checker")
+ }
+ if checkersCalled[1] {
+ t.Errorf("Clone() called second Checker")
+ }
+}
diff --git a/pkg/sentry/seccheck/task.go b/pkg/sentry/seccheck/task.go
new file mode 100644
index 000000000..1dee33203
--- /dev/null
+++ b/pkg/sentry/seccheck/task.go
@@ -0,0 +1,39 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seccheck
+
+import (
+ ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
+)
+
+// TaskInfo contains information unambiguously identifying a single thread
+// and/or its containing process.
+//
+// +fieldenum Task
+type TaskInfo struct {
+ // ThreadID is the thread's ID in the root PID namespace.
+ ThreadID int32
+
+ // ThreadStartTime is the thread's CLOCK_REALTIME start time.
+ ThreadStartTime ktime.Time
+
+ // ThreadGroupID is the thread's group leader's ID in the root PID
+ // namespace.
+ ThreadGroupID int32
+
+ // ThreadGroupStartTime is the thread's group leader's CLOCK_REALTIME start
+ // time.
+ ThreadGroupStartTime ktime.Time
+}
diff --git a/pkg/sentry/socket/control/BUILD b/pkg/sentry/socket/control/BUILD
index 2029e7cf4..b2fc84181 100644
--- a/pkg/sentry/socket/control/BUILD
+++ b/pkg/sentry/socket/control/BUILD
@@ -16,6 +16,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/bits",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/marshal",
"//pkg/marshal/primitive",
@@ -25,7 +26,6 @@ go_library(
"//pkg/sentry/socket",
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/vfs",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go
index 235b9c306..00a5e729a 100644
--- a/pkg/sentry/socket/control/control.go
+++ b/pkg/sentry/socket/control/control.go
@@ -20,6 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -28,7 +29,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
)
const maxInt = int(^uint(0) >> 1)
@@ -70,7 +70,7 @@ func NewSCMRights(t *kernel.Task, fds []int32) (SCMRights, error) {
file := t.GetFile(fd)
if file == nil {
files.Release(t)
- return nil, syserror.EBADF
+ return nil, linuxerr.EBADF
}
files = append(files, file)
}
@@ -169,7 +169,7 @@ func NewSCMCredentials(t *kernel.Task, cred linux.ControlMessageCredentials) (SC
return nil, err
}
if kernel.ThreadID(cred.PID) != t.ThreadGroup().ID() && !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.PIDNamespace().UserNamespace()) {
- return nil, syserror.EPERM
+ return nil, linuxerr.EPERM
}
return &scmCredentials{t, kuid, kgid}, nil
}
@@ -473,17 +473,17 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
for i := 0; i < len(buf); {
if i+linux.SizeOfControlMessageHeader > len(buf) {
- return cmsgs, syserror.EINVAL
+ return cmsgs, linuxerr.EINVAL
}
var h linux.ControlMessageHeader
h.UnmarshalUnsafe(buf[i : i+linux.SizeOfControlMessageHeader])
if h.Length < uint64(linux.SizeOfControlMessageHeader) {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
if h.Length > uint64(len(buf)-i) {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
i += linux.SizeOfControlMessageHeader
@@ -497,7 +497,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
numRights := rightsSize / linux.SizeOfControlMessageRight
if len(fds)+numRights > linux.SCM_MAX_FD {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
for j := i; j < i+rightsSize; j += linux.SizeOfControlMessageRight {
@@ -508,7 +508,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.SCM_CREDENTIALS:
if length < linux.SizeOfControlMessageCredentials {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
var creds linux.ControlMessageCredentials
@@ -522,7 +522,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.SO_TIMESTAMP:
if length < linux.SizeOfTimeval {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
var ts linux.Timeval
ts.UnmarshalUnsafe(buf[i : i+linux.SizeOfTimeval])
@@ -532,13 +532,13 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
default:
// Unknown message type.
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
case linux.SOL_IP:
switch h.Type {
case linux.IP_TOS:
if length < linux.SizeOfControlMessageTOS {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
cmsgs.IP.HasTOS = true
var tos primitive.Uint8
@@ -548,7 +548,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.IP_PKTINFO:
if length < linux.SizeOfControlMessageIPPacketInfo {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
cmsgs.IP.HasIPPacketInfo = true
@@ -561,7 +561,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.IP_RECVORIGDSTADDR:
var addr linux.SockAddrInet
if length < addr.SizeBytes() {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
addr.UnmarshalUnsafe(buf[i : i+addr.SizeBytes()])
cmsgs.IP.OriginalDstAddress = &addr
@@ -570,7 +570,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.IP_RECVERR:
var errCmsg linux.SockErrCMsgIPv4
if length < errCmsg.SizeBytes() {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()])
@@ -578,13 +578,13 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
i += bits.AlignUp(length, width)
default:
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
case linux.SOL_IPV6:
switch h.Type {
case linux.IPV6_TCLASS:
if length < linux.SizeOfControlMessageTClass {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
cmsgs.IP.HasTClass = true
var tclass primitive.Uint32
@@ -595,7 +595,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.IPV6_RECVORIGDSTADDR:
var addr linux.SockAddrInet6
if length < addr.SizeBytes() {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
addr.UnmarshalUnsafe(buf[i : i+addr.SizeBytes()])
cmsgs.IP.OriginalDstAddress = &addr
@@ -604,7 +604,7 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
case linux.IPV6_RECVERR:
var errCmsg linux.SockErrCMsgIPv6
if length < errCmsg.SizeBytes() {
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
errCmsg.UnmarshalBytes(buf[i : i+errCmsg.SizeBytes()])
@@ -612,10 +612,10 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte, width uint)
i += bits.AlignUp(length, width)
default:
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
default:
- return socket.ControlMessages{}, syserror.EINVAL
+ return socket.ControlMessages{}, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/socket/control/control_vfs2.go b/pkg/sentry/socket/control/control_vfs2.go
index 37d02948f..0a989cbeb 100644
--- a/pkg/sentry/socket/control/control_vfs2.go
+++ b/pkg/sentry/socket/control/control_vfs2.go
@@ -17,10 +17,10 @@ package control
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// SCMRightsVFS2 represents a SCM_RIGHTS socket control message.
@@ -51,7 +51,7 @@ func NewSCMRightsVFS2(t *kernel.Task, fds []int32) (SCMRightsVFS2, error) {
file := t.GetFileVFS2(fd)
if file == nil {
files.Release(t)
- return nil, syserror.EBADF
+ return nil, linuxerr.EBADF
}
files = append(files, file)
}
diff --git a/pkg/sentry/socket/hostinet/BUILD b/pkg/sentry/socket/hostinet/BUILD
index 3c6511ead..4ea89f9d0 100644
--- a/pkg/sentry/socket/hostinet/BUILD
+++ b/pkg/sentry/socket/hostinet/BUILD
@@ -18,6 +18,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fdnotifier",
"//pkg/hostarch",
"//pkg/log",
@@ -37,7 +38,6 @@ go_library(
"//pkg/sentry/socket/control",
"//pkg/sentry/vfs",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/tcpip/stack",
"//pkg/usermem",
diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go
index 52ae4bc9c..1c1e501ba 100644
--- a/pkg/sentry/socket/hostinet/socket.go
+++ b/pkg/sentry/socket/hostinet/socket.go
@@ -20,6 +20,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
@@ -34,7 +35,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/control"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -67,23 +67,6 @@ type socketOperations struct {
socketOpsCommon
}
-// socketOpsCommon contains the socket operations common to VFS1 and VFS2.
-//
-// +stateify savable
-type socketOpsCommon struct {
- socket.SendReceiveTimeout
-
- family int // Read-only.
- stype linux.SockType // Read-only.
- protocol int // Read-only.
- queue waiter.Queue
-
- // fd is the host socket fd. It must have O_NONBLOCK, so that operations
- // will return EWOULDBLOCK instead of blocking on the host. This allows us to
- // handle blocking behavior independently in the sentry.
- fd int
-}
-
var _ = socket.Socket(&socketOperations{})
func newSocketFile(ctx context.Context, family int, stype linux.SockType, protocol int, fd int, nonblock bool) (*fs.File, *syserr.Error) {
@@ -103,29 +86,6 @@ func newSocketFile(ctx context.Context, family int, stype linux.SockType, protoc
return fs.NewFile(ctx, dirent, fs.FileFlags{NonBlocking: nonblock, Read: true, Write: true, NonSeekable: true}, s), nil
}
-// Release implements fs.FileOperations.Release.
-func (s *socketOpsCommon) Release(context.Context) {
- fdnotifier.RemoveFD(int32(s.fd))
- unix.Close(s.fd)
-}
-
-// Readiness implements waiter.Waitable.Readiness.
-func (s *socketOpsCommon) Readiness(mask waiter.EventMask) waiter.EventMask {
- return fdnotifier.NonBlockingPoll(int32(s.fd), mask)
-}
-
-// EventRegister implements waiter.Waitable.EventRegister.
-func (s *socketOpsCommon) EventRegister(e *waiter.Entry, mask waiter.EventMask) {
- s.queue.EventRegister(e, mask)
- fdnotifier.UpdateFD(int32(s.fd))
-}
-
-// EventUnregister implements waiter.Waitable.EventUnregister.
-func (s *socketOpsCommon) EventUnregister(e *waiter.Entry) {
- s.queue.EventUnregister(e)
- fdnotifier.UpdateFD(int32(s.fd))
-}
-
// Ioctl implements fs.FileOperations.Ioctl.
func (s *socketOperations) Ioctl(ctx context.Context, _ *fs.File, io usermem.IO, args arch.SyscallArguments) (uintptr, error) {
return ioctl(ctx, s.fd, io, args)
@@ -177,6 +137,96 @@ func (s *socketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IO
return int64(n), err
}
+// Socket implements socket.Provider.Socket.
+func (p *socketProvider) Socket(t *kernel.Task, stypeflags linux.SockType, protocol int) (*fs.File, *syserr.Error) {
+ // Check that we are using the host network stack.
+ stack := t.NetworkContext()
+ if stack == nil {
+ return nil, nil
+ }
+ if _, ok := stack.(*Stack); !ok {
+ return nil, nil
+ }
+
+ // Only accept TCP and UDP.
+ stype := stypeflags & linux.SOCK_TYPE_MASK
+ switch stype {
+ case unix.SOCK_STREAM:
+ switch protocol {
+ case 0, unix.IPPROTO_TCP:
+ // ok
+ default:
+ return nil, nil
+ }
+ case unix.SOCK_DGRAM:
+ switch protocol {
+ case 0, unix.IPPROTO_UDP:
+ // ok
+ default:
+ return nil, nil
+ }
+ default:
+ return nil, nil
+ }
+
+ // Conservatively ignore all flags specified by the application and add
+ // SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0
+ // to simplify the syscall filters, since 0 and IPPROTO_* are equivalent.
+ fd, err := unix.Socket(p.family, int(stype)|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, syserr.FromError(err)
+ }
+ return newSocketFile(t, p.family, stype, protocol, fd, stypeflags&unix.SOCK_NONBLOCK != 0)
+}
+
+// Pair implements socket.Provider.Pair.
+func (p *socketProvider) Pair(t *kernel.Task, stype linux.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) {
+ // Not supported by AF_INET/AF_INET6.
+ return nil, nil, nil
+}
+
+// LINT.ThenChange(./socket_vfs2.go)
+
+// socketOpsCommon contains the socket operations common to VFS1 and VFS2.
+//
+// +stateify savable
+type socketOpsCommon struct {
+ socket.SendReceiveTimeout
+
+ family int // Read-only.
+ stype linux.SockType // Read-only.
+ protocol int // Read-only.
+ queue waiter.Queue
+
+ // fd is the host socket fd. It must have O_NONBLOCK, so that operations
+ // will return EWOULDBLOCK instead of blocking on the host. This allows us to
+ // handle blocking behavior independently in the sentry.
+ fd int
+}
+
+// Release implements fs.FileOperations.Release.
+func (s *socketOpsCommon) Release(context.Context) {
+ fdnotifier.RemoveFD(int32(s.fd))
+ unix.Close(s.fd)
+}
+
+// Readiness implements waiter.Waitable.Readiness.
+func (s *socketOpsCommon) Readiness(mask waiter.EventMask) waiter.EventMask {
+ return fdnotifier.NonBlockingPoll(int32(s.fd), mask)
+}
+
+// EventRegister implements waiter.Waitable.EventRegister.
+func (s *socketOpsCommon) EventRegister(e *waiter.Entry, mask waiter.EventMask) {
+ s.queue.EventRegister(e, mask)
+ fdnotifier.UpdateFD(int32(s.fd))
+}
+
+// EventUnregister implements waiter.Waitable.EventUnregister.
+func (s *socketOpsCommon) EventUnregister(e *waiter.Entry) {
+ s.queue.EventUnregister(e)
+ fdnotifier.UpdateFD(int32(s.fd))
+}
+
// Connect implements socket.Socket.Connect.
func (s *socketOpsCommon) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error {
if len(sockaddr) > sizeofSockaddr {
@@ -237,7 +287,7 @@ func (s *socketOpsCommon) Accept(t *kernel.Task, peerRequested bool, flags int,
fd, syscallErr := accept4(s.fd, peerAddrPtr, peerAddrlenPtr, unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC)
if blocking {
var ch chan struct{}
- for syscallErr == syserror.ErrWouldBlock {
+ for syscallErr == linuxerr.ErrWouldBlock {
if ch != nil {
if syscallErr = t.Block(ch); syscallErr != nil {
break
@@ -484,7 +534,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
n, err := copyToDst()
// recv*(MSG_ERRQUEUE) never blocks, even without MSG_DONTWAIT.
if flags&(unix.MSG_DONTWAIT|unix.MSG_ERRQUEUE) == 0 {
- for err == syserror.ErrWouldBlock {
+ for err == linuxerr.ErrWouldBlock {
// We only expect blocking to come from the actual syscall, in which
// case it can't have returned any data.
if n != 0 {
@@ -596,6 +646,17 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
return 0, syserr.ErrInvalidArgument
}
+ // If the src is zero-length, call SENDTO directly with a null buffer in
+ // order to generate poll/epoll notifications.
+ if src.NumBytes() == 0 {
+ sysflags := flags | unix.MSG_DONTWAIT
+ n, _, errno := unix.Syscall6(unix.SYS_SENDTO, uintptr(s.fd), 0, 0, uintptr(sysflags), uintptr(firstBytePtr(to)), uintptr(len(to)))
+ if errno != 0 {
+ return 0, syserr.FromError(errno)
+ }
+ return int(n), nil
+ }
+
space := uint64(control.CmsgsSpace(t, controlMessages))
if space > maxControlLen {
space = maxControlLen
@@ -645,7 +706,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
var ch chan struct{}
n, err := src.CopyInTo(t, sendmsgFromBlocks)
if flags&unix.MSG_DONTWAIT == 0 {
- for err == syserror.ErrWouldBlock {
+ for err == linuxerr.ErrWouldBlock {
// We only expect blocking to come from the actual syscall, in which
// case it can't have returned any data.
if n != 0 {
@@ -653,8 +714,8 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
}
if ch != nil {
if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -673,7 +734,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
func translateIOSyscallError(err error) error {
if err == unix.EAGAIN || err == unix.EWOULDBLOCK {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
return err
}
@@ -709,56 +770,6 @@ type socketProvider struct {
family int
}
-// Socket implements socket.Provider.Socket.
-func (p *socketProvider) Socket(t *kernel.Task, stypeflags linux.SockType, protocol int) (*fs.File, *syserr.Error) {
- // Check that we are using the host network stack.
- stack := t.NetworkContext()
- if stack == nil {
- return nil, nil
- }
- if _, ok := stack.(*Stack); !ok {
- return nil, nil
- }
-
- // Only accept TCP and UDP.
- stype := stypeflags & linux.SOCK_TYPE_MASK
- switch stype {
- case unix.SOCK_STREAM:
- switch protocol {
- case 0, unix.IPPROTO_TCP:
- // ok
- default:
- return nil, nil
- }
- case unix.SOCK_DGRAM:
- switch protocol {
- case 0, unix.IPPROTO_UDP:
- // ok
- default:
- return nil, nil
- }
- default:
- return nil, nil
- }
-
- // Conservatively ignore all flags specified by the application and add
- // SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0
- // to simplify the syscall filters, since 0 and IPPROTO_* are equivalent.
- fd, err := unix.Socket(p.family, int(stype)|unix.SOCK_NONBLOCK|unix.SOCK_CLOEXEC, 0)
- if err != nil {
- return nil, syserr.FromError(err)
- }
- return newSocketFile(t, p.family, stype, protocol, fd, stypeflags&unix.SOCK_NONBLOCK != 0)
-}
-
-// Pair implements socket.Provider.Pair.
-func (p *socketProvider) Pair(t *kernel.Task, stype linux.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) {
- // Not supported by AF_INET/AF_INET6.
- return nil, nil, nil
-}
-
-// LINT.ThenChange(./socket_vfs2.go)
-
func init() {
for _, family := range []int{unix.AF_INET, unix.AF_INET6} {
socket.RegisterProvider(family, &socketProvider{family})
diff --git a/pkg/sentry/socket/hostinet/socket_unsafe.go b/pkg/sentry/socket/hostinet/socket_unsafe.go
index d3be2d825..587f479eb 100644
--- a/pkg/sentry/socket/hostinet/socket_unsafe.go
+++ b/pkg/sentry/socket/hostinet/socket_unsafe.go
@@ -20,12 +20,12 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -67,9 +67,25 @@ func ioctl(ctx context.Context, fd int, io usermem.IO, args arch.SyscallArgument
AddressSpaceActive: true,
})
return 0, err
-
+ case unix.SIOCGIFFLAGS, unix.SIOCGIFCONF:
+ cc := &usermem.IOCopyContext{
+ Ctx: ctx,
+ IO: io,
+ Opts: usermem.IOOpts{
+ AddressSpaceActive: true,
+ },
+ }
+ var ifr linux.IFReq
+ if _, err := ifr.CopyIn(cc, args[2].Pointer()); err != nil {
+ return 0, err
+ }
+ if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(&ifr))); errno != 0 {
+ return 0, translateIOSyscallError(errno)
+ }
+ _, err := ifr.CopyOut(cc, args[2].Pointer())
+ return 0, err
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
diff --git a/pkg/sentry/socket/hostinet/socket_vfs2.go b/pkg/sentry/socket/hostinet/socket_vfs2.go
index 5d55cc64d..cd6e34ecc 100644
--- a/pkg/sentry/socket/hostinet/socket_vfs2.go
+++ b/pkg/sentry/socket/hostinet/socket_vfs2.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fdnotifier"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/sockfs"
@@ -26,7 +27,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -105,7 +105,7 @@ func (s *socketVFS2) Ioctl(ctx context.Context, uio usermem.IO, args arch.Syscal
// PRead implements vfs.FileDescriptionImpl.PRead.
func (s *socketVFS2) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Read implements vfs.FileDescriptionImpl.
@@ -113,7 +113,7 @@ func (s *socketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
reader := hostfd.GetReadWriterAt(int32(s.fd), -1, opts.Flags)
@@ -124,7 +124,7 @@ func (s *socketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// PWrite implements vfs.FileDescriptionImpl.
func (s *socketVFS2) PWrite(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Write implements vfs.FileDescriptionImpl.
@@ -132,7 +132,7 @@ func (s *socketVFS2) Write(ctx context.Context, src usermem.IOSequence, opts vfs
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
writer := hostfd.GetReadWriterAt(int32(s.fd), -1, opts.Flags)
diff --git a/pkg/sentry/socket/hostinet/sockopt_impl.go b/pkg/sentry/socket/hostinet/sockopt_impl.go
index 8a783712e..2397e04e7 100644
--- a/pkg/sentry/socket/hostinet/sockopt_impl.go
+++ b/pkg/sentry/socket/hostinet/sockopt_impl.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package hostinet
import (
diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go
index cbb1e905d..61111ac6c 100644
--- a/pkg/sentry/socket/hostinet/stack.go
+++ b/pkg/sentry/socket/hostinet/stack.go
@@ -29,11 +29,11 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/usermem"
@@ -309,6 +309,11 @@ func (s *Stack) Interfaces() map[int32]inet.Interface {
return interfaces
}
+// RemoveInterface implements inet.Stack.RemoveInterface.
+func (*Stack) RemoveInterface(int32) error {
+ return linuxerr.EACCES
+}
+
// InterfaceAddrs implements inet.Stack.InterfaceAddrs.
func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr {
addrs := make(map[int32][]inet.InterfaceAddr)
@@ -319,13 +324,13 @@ func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr {
}
// AddInterfaceAddr implements inet.Stack.AddInterfaceAddr.
-func (s *Stack) AddInterfaceAddr(int32, inet.InterfaceAddr) error {
- return syserror.EACCES
+func (*Stack) AddInterfaceAddr(int32, inet.InterfaceAddr) error {
+ return linuxerr.EACCES
}
// RemoveInterfaceAddr implements inet.Stack.RemoveInterfaceAddr.
-func (s *Stack) RemoveInterfaceAddr(int32, inet.InterfaceAddr) error {
- return syserror.EACCES
+func (*Stack) RemoveInterfaceAddr(int32, inet.InterfaceAddr) error {
+ return linuxerr.EACCES
}
// SupportsIPv6 implements inet.Stack.SupportsIPv6.
@@ -339,8 +344,8 @@ func (s *Stack) TCPReceiveBufferSize() (inet.TCPBufferSize, error) {
}
// SetTCPReceiveBufferSize implements inet.Stack.SetTCPReceiveBufferSize.
-func (s *Stack) SetTCPReceiveBufferSize(size inet.TCPBufferSize) error {
- return syserror.EACCES
+func (*Stack) SetTCPReceiveBufferSize(inet.TCPBufferSize) error {
+ return linuxerr.EACCES
}
// TCPSendBufferSize implements inet.Stack.TCPSendBufferSize.
@@ -349,8 +354,8 @@ func (s *Stack) TCPSendBufferSize() (inet.TCPBufferSize, error) {
}
// SetTCPSendBufferSize implements inet.Stack.SetTCPSendBufferSize.
-func (s *Stack) SetTCPSendBufferSize(size inet.TCPBufferSize) error {
- return syserror.EACCES
+func (*Stack) SetTCPSendBufferSize(inet.TCPBufferSize) error {
+ return linuxerr.EACCES
}
// TCPSACKEnabled implements inet.Stack.TCPSACKEnabled.
@@ -359,8 +364,8 @@ func (s *Stack) TCPSACKEnabled() (bool, error) {
}
// SetTCPSACKEnabled implements inet.Stack.SetTCPSACKEnabled.
-func (s *Stack) SetTCPSACKEnabled(bool) error {
- return syserror.EACCES
+func (*Stack) SetTCPSACKEnabled(bool) error {
+ return linuxerr.EACCES
}
// TCPRecovery implements inet.Stack.TCPRecovery.
@@ -369,8 +374,8 @@ func (s *Stack) TCPRecovery() (inet.TCPLossRecovery, error) {
}
// SetTCPRecovery implements inet.Stack.SetTCPRecovery.
-func (s *Stack) SetTCPRecovery(inet.TCPLossRecovery) error {
- return syserror.EACCES
+func (*Stack) SetTCPRecovery(inet.TCPLossRecovery) error {
+ return linuxerr.EACCES
}
// getLine reads one line from proc file, with specified prefix.
@@ -470,20 +475,20 @@ func (s *Stack) RouteTable() []inet.Route {
}
// Resume implements inet.Stack.Resume.
-func (s *Stack) Resume() {}
+func (*Stack) Resume() {}
// RegisteredEndpoints implements inet.Stack.RegisteredEndpoints.
-func (s *Stack) RegisteredEndpoints() []stack.TransportEndpoint { return nil }
+func (*Stack) RegisteredEndpoints() []stack.TransportEndpoint { return nil }
// CleanupEndpoints implements inet.Stack.CleanupEndpoints.
-func (s *Stack) CleanupEndpoints() []stack.TransportEndpoint { return nil }
+func (*Stack) CleanupEndpoints() []stack.TransportEndpoint { return nil }
// RestoreCleanupEndpoints implements inet.Stack.RestoreCleanupEndpoints.
-func (s *Stack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}
+func (*Stack) RestoreCleanupEndpoints([]stack.TransportEndpoint) {}
// SetForwarding implements inet.Stack.SetForwarding.
-func (s *Stack) SetForwarding(tcpip.NetworkProtocolNumber, bool) error {
- return syserror.EACCES
+func (*Stack) SetForwarding(tcpip.NetworkProtocolNumber, bool) error {
+ return linuxerr.EACCES
}
// PortRange implements inet.Stack.PortRange.
@@ -493,6 +498,6 @@ func (*Stack) PortRange() (uint16, uint16) {
}
// SetPortRange implements inet.Stack.SetPortRange.
-func (*Stack) SetPortRange(start uint16, end uint16) error {
- return syserror.EACCES
+func (*Stack) SetPortRange(uint16, uint16) error {
+ return linuxerr.EACCES
}
diff --git a/pkg/sentry/socket/netfilter/BUILD b/pkg/sentry/socket/netfilter/BUILD
index 61b2c9755..608474fa1 100644
--- a/pkg/sentry/socket/netfilter/BUILD
+++ b/pkg/sentry/socket/netfilter/BUILD
@@ -25,6 +25,7 @@ go_library(
"//pkg/log",
"//pkg/marshal",
"//pkg/sentry/kernel",
+ "//pkg/sentry/kernel/auth",
"//pkg/syserr",
"//pkg/tcpip",
"//pkg/tcpip/header",
diff --git a/pkg/sentry/socket/netfilter/extensions.go b/pkg/sentry/socket/netfilter/extensions.go
index 6fc7781ad..3f1b4a17b 100644
--- a/pkg/sentry/socket/netfilter/extensions.go
+++ b/pkg/sentry/socket/netfilter/extensions.go
@@ -19,20 +19,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
-// TODO(gvisor.dev/issue/170): The following per-matcher params should be
-// supported:
-// - Table name
-// - Match size
-// - User size
-// - Hooks
-// - Proto
-// - Family
-
// matchMaker knows how to (un)marshal the matcher named name().
type matchMaker interface {
// name is the matcher name as stored in the xt_entry_match struct.
@@ -43,7 +35,7 @@ type matchMaker interface {
// unmarshal converts from the ABI matcher struct to an
// stack.Matcher.
- unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error)
+ unmarshal(task *kernel.Task, buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error)
}
type matcher interface {
@@ -94,12 +86,12 @@ func marshalEntryMatch(name string, data []byte) []byte {
return buf
}
-func unmarshalMatcher(match linux.XTEntryMatch, filter stack.IPHeaderFilter, buf []byte) (stack.Matcher, error) {
+func unmarshalMatcher(task *kernel.Task, match linux.XTEntryMatch, filter stack.IPHeaderFilter, buf []byte) (stack.Matcher, error) {
matchMaker, ok := matchMakers[match.Name.String()]
if !ok {
return nil, fmt.Errorf("unsupported matcher with name %q", match.Name.String())
}
- return matchMaker.unmarshal(buf, filter)
+ return matchMaker.unmarshal(task, buf, filter)
}
// targetMaker knows how to (un)marshal a target. Once registered,
diff --git a/pkg/sentry/socket/netfilter/ipv4.go b/pkg/sentry/socket/netfilter/ipv4.go
index cb78ef60b..af31cbc5b 100644
--- a/pkg/sentry/socket/netfilter/ipv4.go
+++ b/pkg/sentry/socket/netfilter/ipv4.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -80,6 +81,8 @@ func getEntries4(table stack.Table, tablename linux.TableName) (linux.KernelIPTG
copy(entry.Entry.IP.SrcMask[:], rule.Filter.SrcMask)
copy(entry.Entry.IP.OutputInterface[:], rule.Filter.OutputInterface)
copy(entry.Entry.IP.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)
+ copy(entry.Entry.IP.InputInterface[:], rule.Filter.InputInterface)
+ copy(entry.Entry.IP.InputInterfaceMask[:], rule.Filter.InputInterfaceMask)
if rule.Filter.DstInvert {
entry.Entry.IP.InverseFlags |= linux.IPT_INV_DSTIP
}
@@ -123,7 +126,7 @@ func getEntries4(table stack.Table, tablename linux.TableName) (linux.KernelIPTG
return entries, info
}
-func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, table *stack.Table) (map[uint32]int, *syserr.Error) {
+func modifyEntries4(task *kernel.Task, stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, table *stack.Table) (map[uint32]int, *syserr.Error) {
nflog("set entries: setting entries in table %q", replace.Name.String())
// Convert input into a list of rules and their offsets.
@@ -148,23 +151,19 @@ func modifyEntries4(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace,
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): We should support more IPTIP
- // filtering fields.
filter, err := filterFromIPTIP(entry.IP)
if err != nil {
nflog("bad iptip: %v", err)
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): Matchers and targets can specify
- // that they only work for certain protocols, hooks, tables.
// Get matchers.
matchersSize := entry.TargetOffset - linux.SizeOfIPTEntry
if len(optVal) < int(matchersSize) {
nflog("entry doesn't have enough room for its matchers (only %d bytes remain)", len(optVal))
return nil, syserr.ErrInvalidArgument
}
- matchers, err := parseMatchers(filter, optVal[:matchersSize])
+ matchers, err := parseMatchers(task, filter, optVal[:matchersSize])
if err != nil {
nflog("failed to parse matchers: %v", err)
return nil, syserr.ErrInvalidArgument
diff --git a/pkg/sentry/socket/netfilter/ipv6.go b/pkg/sentry/socket/netfilter/ipv6.go
index 5cb7fe4aa..6cefe0b9c 100644
--- a/pkg/sentry/socket/netfilter/ipv6.go
+++ b/pkg/sentry/socket/netfilter/ipv6.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -80,6 +81,8 @@ func getEntries6(table stack.Table, tablename linux.TableName) (linux.KernelIP6T
copy(entry.Entry.IPv6.SrcMask[:], rule.Filter.SrcMask)
copy(entry.Entry.IPv6.OutputInterface[:], rule.Filter.OutputInterface)
copy(entry.Entry.IPv6.OutputInterfaceMask[:], rule.Filter.OutputInterfaceMask)
+ copy(entry.Entry.IPv6.InputInterface[:], rule.Filter.InputInterface)
+ copy(entry.Entry.IPv6.InputInterfaceMask[:], rule.Filter.InputInterfaceMask)
if rule.Filter.DstInvert {
entry.Entry.IPv6.InverseFlags |= linux.IP6T_INV_DSTIP
}
@@ -126,7 +129,7 @@ func getEntries6(table stack.Table, tablename linux.TableName) (linux.KernelIP6T
return entries, info
}
-func modifyEntries6(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, table *stack.Table) (map[uint32]int, *syserr.Error) {
+func modifyEntries6(task *kernel.Task, stk *stack.Stack, optVal []byte, replace *linux.IPTReplace, table *stack.Table) (map[uint32]int, *syserr.Error) {
nflog("set entries: setting entries in table %q", replace.Name.String())
// Convert input into a list of rules and their offsets.
@@ -151,23 +154,19 @@ func modifyEntries6(stk *stack.Stack, optVal []byte, replace *linux.IPTReplace,
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): We should support more IPTIP
- // filtering fields.
filter, err := filterFromIP6TIP(entry.IPv6)
if err != nil {
nflog("bad iptip: %v", err)
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): Matchers and targets can specify
- // that they only work for certain protocols, hooks, tables.
// Get matchers.
matchersSize := entry.TargetOffset - linux.SizeOfIP6TEntry
if len(optVal) < int(matchersSize) {
nflog("entry doesn't have enough room for its matchers (only %d bytes remain)", len(optVal))
return nil, syserr.ErrInvalidArgument
}
- matchers, err := parseMatchers(filter, optVal[:matchersSize])
+ matchers, err := parseMatchers(task, filter, optVal[:matchersSize])
if err != nil {
nflog("failed to parse matchers: %v", err)
return nil, syserr.ErrInvalidArgument
diff --git a/pkg/sentry/socket/netfilter/netfilter.go b/pkg/sentry/socket/netfilter/netfilter.go
index e1c4b06fc..e3eade180 100644
--- a/pkg/sentry/socket/netfilter/netfilter.go
+++ b/pkg/sentry/socket/netfilter/netfilter.go
@@ -174,13 +174,12 @@ func setHooksAndUnderflow(info *linux.IPTGetinfo, table stack.Table, offset uint
// SetEntries sets iptables rules for a single table. See
// net/ipv4/netfilter/ip_tables.c:translate_table for reference.
-func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
+func SetEntries(task *kernel.Task, stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
var replace linux.IPTReplace
replaceBuf := optVal[:linux.SizeOfIPTReplace]
optVal = optVal[linux.SizeOfIPTReplace:]
replace.UnmarshalBytes(replaceBuf)
- // TODO(gvisor.dev/issue/170): Support other tables.
var table stack.Table
switch replace.Name.String() {
case filterTable:
@@ -188,16 +187,16 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
case natTable:
table = stack.EmptyNATTable()
default:
- nflog("we don't yet support writing to the %q table (gvisor.dev/issue/170)", replace.Name.String())
+ nflog("unknown iptables table %q", replace.Name.String())
return syserr.ErrInvalidArgument
}
var err *syserr.Error
var offsets map[uint32]int
if ipv6 {
- offsets, err = modifyEntries6(stk, optVal, &replace, &table)
+ offsets, err = modifyEntries6(task, stk, optVal, &replace, &table)
} else {
- offsets, err = modifyEntries4(stk, optVal, &replace, &table)
+ offsets, err = modifyEntries4(task, stk, optVal, &replace, &table)
}
if err != nil {
return err
@@ -272,7 +271,6 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
table.Rules[ruleIdx] = rule
}
- // TODO(gvisor.dev/issue/170): Support other chains.
// Since we don't support FORWARD, yet, make sure all other chains point to
// ACCEPT rules.
for hook, ruleIdx := range table.BuiltinChains {
@@ -287,7 +285,7 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
}
}
- // TODO(gvisor.dev/issue/170): Check the following conditions:
+ // TODO(gvisor.dev/issue/6167): Check the following conditions:
// - There are no loops.
// - There are no chains without an unconditional final rule.
// - There are no chains without an unconditional underflow rule.
@@ -297,7 +295,7 @@ func SetEntries(stk *stack.Stack, optVal []byte, ipv6 bool) *syserr.Error {
// parseMatchers parses 0 or more matchers from optVal. optVal should contain
// only the matchers.
-func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher, error) {
+func parseMatchers(task *kernel.Task, filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher, error) {
nflog("set entries: parsing matchers of size %d", len(optVal))
var matchers []stack.Matcher
for len(optVal) > 0 {
@@ -321,13 +319,13 @@ func parseMatchers(filter stack.IPHeaderFilter, optVal []byte) ([]stack.Matcher,
}
// Parse the specific matcher.
- matcher, err := unmarshalMatcher(match, filter, optVal[linux.SizeOfXTEntryMatch:match.MatchSize])
+ matcher, err := unmarshalMatcher(task, match, filter, optVal[linux.SizeOfXTEntryMatch:match.MatchSize])
if err != nil {
return nil, fmt.Errorf("failed to create matcher: %v", err)
}
matchers = append(matchers, matcher)
- // TODO(gvisor.dev/issue/170): Check the revision field.
+ // TODO(gvisor.dev/issue/6167): Check the revision field.
optVal = optVal[match.MatchSize:]
}
diff --git a/pkg/sentry/socket/netfilter/owner_matcher.go b/pkg/sentry/socket/netfilter/owner_matcher.go
index 60845cab3..6eff2ae65 100644
--- a/pkg/sentry/socket/netfilter/owner_matcher.go
+++ b/pkg/sentry/socket/netfilter/owner_matcher.go
@@ -19,6 +19,8 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/marshal"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
@@ -40,8 +42,8 @@ func (ownerMarshaler) name() string {
func (ownerMarshaler) marshal(mr matcher) []byte {
matcher := mr.(*OwnerMatcher)
iptOwnerInfo := linux.IPTOwnerInfo{
- UID: matcher.uid,
- GID: matcher.gid,
+ UID: uint32(matcher.uid),
+ GID: uint32(matcher.gid),
}
// Support for UID and GID match.
@@ -63,7 +65,7 @@ func (ownerMarshaler) marshal(mr matcher) []byte {
}
// unmarshal implements matchMaker.unmarshal.
-func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
+func (ownerMarshaler) unmarshal(task *kernel.Task, buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
if len(buf) < linux.SizeOfIPTOwnerInfo {
return nil, fmt.Errorf("buf has insufficient size for owner match: %d", len(buf))
}
@@ -72,11 +74,12 @@ func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.
// exceed what's strictly necessary to hold matchData.
var matchData linux.IPTOwnerInfo
matchData.UnmarshalUnsafe(buf[:linux.SizeOfIPTOwnerInfo])
- nflog("parseMatchers: parsed IPTOwnerInfo: %+v", matchData)
+ nflog("parsed IPTOwnerInfo: %+v", matchData)
var owner OwnerMatcher
- owner.uid = matchData.UID
- owner.gid = matchData.GID
+ creds := task.Credentials()
+ owner.uid = creds.UserNamespace.MapToKUID(auth.UID(matchData.UID))
+ owner.gid = creds.UserNamespace.MapToKGID(auth.GID(matchData.GID))
// Check flags.
if matchData.Match&linux.XT_OWNER_UID != 0 {
@@ -97,8 +100,8 @@ func (ownerMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.
// OwnerMatcher matches against a UID and/or GID.
type OwnerMatcher struct {
- uid uint32
- gid uint32
+ uid auth.KUID
+ gid auth.KGID
matchUID bool
matchGID bool
invertUID bool
@@ -113,7 +116,6 @@ func (*OwnerMatcher) name() string {
// Match implements Matcher.Match.
func (om *OwnerMatcher) Match(hook stack.Hook, pkt *stack.PacketBuffer, _, _ string) (bool, bool) {
// Support only for OUTPUT chain.
- // TODO(gvisor.dev/issue/170): Need to support for POSTROUTING chain also.
if hook != stack.Output {
return false, true
}
@@ -126,7 +128,7 @@ func (om *OwnerMatcher) Match(hook stack.Hook, pkt *stack.PacketBuffer, _, _ str
var matches bool
// Check for UID match.
if om.matchUID {
- if pkt.Owner.UID() == om.uid {
+ if auth.KUID(pkt.Owner.KUID()) == om.uid {
matches = true
}
if matches == om.invertUID {
@@ -137,7 +139,7 @@ func (om *OwnerMatcher) Match(hook stack.Hook, pkt *stack.PacketBuffer, _, _ str
// Check for GID match.
if om.matchGID {
matches = false
- if pkt.Owner.GID() == om.gid {
+ if auth.KGID(pkt.Owner.KGID()) == om.gid {
matches = true
}
if matches == om.invertGID {
diff --git a/pkg/sentry/socket/netfilter/targets.go b/pkg/sentry/socket/netfilter/targets.go
index fa5456eee..ea56f39c1 100644
--- a/pkg/sentry/socket/netfilter/targets.go
+++ b/pkg/sentry/socket/netfilter/targets.go
@@ -331,7 +331,6 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): Check if the flags are valid.
// Also check if we need to map ports or IP.
// For now, redirect target only supports destination port change.
// Port range and IP range are not supported yet.
@@ -340,7 +339,6 @@ func (*redirectTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): Port range is not supported yet.
if nfRange.RangeIPV4.MinPort != nfRange.RangeIPV4.MaxPort {
nflog("redirectTargetMaker: MinPort != MaxPort (%d, %d)", nfRange.RangeIPV4.MinPort, nfRange.RangeIPV4.MaxPort)
return nil, syserr.ErrInvalidArgument
@@ -420,7 +418,6 @@ func (*nfNATTargetMaker) unmarshal(buf []byte, filter stack.IPHeaderFilter) (tar
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/3549): Check for other flags.
// For now, redirect target only supports destination change.
if natRange.Flags != linux.NF_NAT_RANGE_PROTO_SPECIFIED {
nflog("nfNATTargetMaker: invalid range flags %d", natRange.Flags)
@@ -502,7 +499,6 @@ func (*snatTargetMakerV4) unmarshal(buf []byte, filter stack.IPHeaderFilter) (ta
return nil, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/170): Port range is not supported yet.
if nfRange.RangeIPV4.MinPort != nfRange.RangeIPV4.MaxPort {
nflog("snatTargetMakerV4: MinPort != MaxPort (%d, %d)", nfRange.RangeIPV4.MinPort, nfRange.RangeIPV4.MaxPort)
return nil, syserr.ErrInvalidArgument
@@ -594,7 +590,6 @@ func (*snatTargetMakerV6) unmarshal(buf []byte, filter stack.IPHeaderFilter) (ta
// translateToStandardTarget translates from the value in a
// linux.XTStandardTarget to an stack.Verdict.
func translateToStandardTarget(val int32, netProto tcpip.NetworkProtocolNumber) (target, *syserr.Error) {
- // TODO(gvisor.dev/issue/170): Support other verdicts.
switch val {
case -linux.NF_ACCEPT - 1:
return &acceptTarget{stack.AcceptTarget{
diff --git a/pkg/sentry/socket/netfilter/tcp_matcher.go b/pkg/sentry/socket/netfilter/tcp_matcher.go
index 95bb9826e..e5b73a976 100644
--- a/pkg/sentry/socket/netfilter/tcp_matcher.go
+++ b/pkg/sentry/socket/netfilter/tcp_matcher.go
@@ -19,6 +19,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/marshal"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
@@ -50,7 +51,7 @@ func (tcpMarshaler) marshal(mr matcher) []byte {
}
// unmarshal implements matchMaker.unmarshal.
-func (tcpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
+func (tcpMarshaler) unmarshal(_ *kernel.Task, buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
if len(buf) < linux.SizeOfXTTCP {
return nil, fmt.Errorf("buf has insufficient size for TCP match: %d", len(buf))
}
@@ -95,8 +96,6 @@ func (*TCPMatcher) name() string {
// Match implements Matcher.Match.
func (tm *TCPMatcher) Match(hook stack.Hook, pkt *stack.PacketBuffer, _, _ string) (bool, bool) {
- // TODO(gvisor.dev/issue/170): Proto checks should ultimately be moved
- // into the stack.Check codepath as matchers are added.
switch pkt.NetworkProtocolNumber {
case header.IPv4ProtocolNumber:
netHeader := header.IPv4(pkt.NetworkHeader().View())
diff --git a/pkg/sentry/socket/netfilter/udp_matcher.go b/pkg/sentry/socket/netfilter/udp_matcher.go
index fb8be27e6..aa72ee70c 100644
--- a/pkg/sentry/socket/netfilter/udp_matcher.go
+++ b/pkg/sentry/socket/netfilter/udp_matcher.go
@@ -19,6 +19,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/marshal"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
@@ -50,7 +51,7 @@ func (udpMarshaler) marshal(mr matcher) []byte {
}
// unmarshal implements matchMaker.unmarshal.
-func (udpMarshaler) unmarshal(buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
+func (udpMarshaler) unmarshal(_ *kernel.Task, buf []byte, filter stack.IPHeaderFilter) (stack.Matcher, error) {
if len(buf) < linux.SizeOfXTUDP {
return nil, fmt.Errorf("buf has insufficient size for UDP match: %d", len(buf))
}
@@ -92,8 +93,6 @@ func (*UDPMatcher) name() string {
// Match implements Matcher.Match.
func (um *UDPMatcher) Match(hook stack.Hook, pkt *stack.PacketBuffer, _, _ string) (bool, bool) {
- // TODO(gvisor.dev/issue/170): Proto checks should ultimately be moved
- // into the stack.Check codepath as matchers are added.
switch pkt.NetworkProtocolNumber {
case header.IPv4ProtocolNumber:
netHeader := header.IPv4(pkt.NetworkHeader().View())
diff --git a/pkg/sentry/socket/netlink/BUILD b/pkg/sentry/socket/netlink/BUILD
index 64cd263da..9710a15ee 100644
--- a/pkg/sentry/socket/netlink/BUILD
+++ b/pkg/sentry/socket/netlink/BUILD
@@ -14,8 +14,10 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/abi/linux",
+ "//pkg/abi/linux/errno",
"//pkg/bits",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/marshal",
"//pkg/marshal/primitive",
@@ -34,7 +36,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/usermem",
"//pkg/waiter",
diff --git a/pkg/sentry/socket/netlink/route/protocol.go b/pkg/sentry/socket/netlink/route/protocol.go
index 86f6419dc..d526acb73 100644
--- a/pkg/sentry/socket/netlink/route/protocol.go
+++ b/pkg/sentry/socket/netlink/route/protocol.go
@@ -161,6 +161,47 @@ func (p *Protocol) getLink(ctx context.Context, msg *netlink.Message, ms *netlin
return nil
}
+// delLink handles RTM_DELLINK requests.
+func (p *Protocol) delLink(ctx context.Context, msg *netlink.Message, ms *netlink.MessageSet) *syserr.Error {
+ stack := inet.StackFromContext(ctx)
+ if stack == nil {
+ // No network stack.
+ return syserr.ErrProtocolNotSupported
+ }
+
+ var ifinfomsg linux.InterfaceInfoMessage
+ attrs, ok := msg.GetData(&ifinfomsg)
+ if !ok {
+ return syserr.ErrInvalidArgument
+ }
+ if ifinfomsg.Index == 0 {
+ // The index is unspecified, search by the interface name.
+ ahdr, value, _, ok := attrs.ParseFirst()
+ if !ok {
+ return syserr.ErrInvalidArgument
+ }
+ switch ahdr.Type {
+ case linux.IFLA_IFNAME:
+ if len(value) < 1 {
+ return syserr.ErrInvalidArgument
+ }
+ ifname := string(value[:len(value)-1])
+ for idx, ifa := range stack.Interfaces() {
+ if ifname == ifa.Name {
+ ifinfomsg.Index = idx
+ break
+ }
+ }
+ default:
+ return syserr.ErrInvalidArgument
+ }
+ if ifinfomsg.Index == 0 {
+ return syserr.ErrNoDevice
+ }
+ }
+ return syserr.FromError(stack.RemoveInterface(ifinfomsg.Index))
+}
+
// addNewLinkMessage appends RTM_NEWLINK message for the given interface into
// the message set.
func addNewLinkMessage(ms *netlink.MessageSet, idx int32, i inet.Interface) {
@@ -537,6 +578,8 @@ func (p *Protocol) ProcessMessage(ctx context.Context, msg *netlink.Message, ms
switch hdr.Type {
case linux.RTM_GETLINK:
return p.getLink(ctx, msg, ms)
+ case linux.RTM_DELLINK:
+ return p.delLink(ctx, msg, ms)
case linux.RTM_GETROUTE:
return p.dumpRoutes(ctx, msg, ms)
case linux.RTM_NEWADDR:
diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go
index 280563d09..ed5fa9c38 100644
--- a/pkg/sentry/socket/netlink/socket.go
+++ b/pkg/sentry/socket/netlink/socket.go
@@ -20,7 +20,9 @@ import (
"math"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/abi/linux/errno"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -37,7 +39,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -56,7 +57,7 @@ const (
maxSendBufferSize = 4 << 20 // 4MB
)
-var errNoFilter = syserr.New("no filter attached", linux.ENOENT)
+var errNoFilter = syserr.New("no filter attached", errno.ENOENT)
// netlinkSocketDevice is the netlink socket virtual device.
var netlinkSocketDevice = device.NewAnonDevice()
@@ -212,7 +213,7 @@ func (s *socketOpsCommon) ConnectedPasscred() bool {
// Ioctl implements fs.FileOperations.Ioctl.
func (*Socket) Ioctl(context.Context, *fs.File, usermem.IO, arch.SyscallArguments) (uintptr, error) {
// TODO(b/68878065): no ioctls supported.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// ExtractSockAddr extracts the SockAddrNetlink from b.
@@ -528,7 +529,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
}
}
- if n, err := doRead(); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {
+ if n, err := doRead(); err != linuxerr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {
var mflags int
if n < int64(r.MsgSize) {
mflags |= linux.MSG_TRUNC
@@ -546,7 +547,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
defer s.EventUnregister(&e)
for {
- if n, err := doRead(); err != syserror.ErrWouldBlock {
+ if n, err := doRead(); err != linuxerr.ErrWouldBlock {
var mflags int
if n < int64(r.MsgSize) {
mflags |= linux.MSG_TRUNC
@@ -558,7 +559,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
}
if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain
}
return 0, 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)
diff --git a/pkg/sentry/socket/netlink/socket_vfs2.go b/pkg/sentry/socket/netlink/socket_vfs2.go
index 842036764..4d3cdea62 100644
--- a/pkg/sentry/socket/netlink/socket_vfs2.go
+++ b/pkg/sentry/socket/netlink/socket_vfs2.go
@@ -17,6 +17,7 @@ package netlink
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/socket"
@@ -24,7 +25,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -108,12 +108,12 @@ func (s *SocketVFS2) EventUnregister(e *waiter.Entry) {
// Ioctl implements vfs.FileDescriptionImpl.
func (*SocketVFS2) Ioctl(context.Context, usermem.IO, arch.SyscallArguments) (uintptr, error) {
// TODO(b/68878065): no ioctls supported.
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// PRead implements vfs.FileDescriptionImpl.
func (s *SocketVFS2) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Read implements vfs.FileDescriptionImpl.
@@ -121,7 +121,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
if dst.NumBytes() == 0 {
@@ -134,7 +134,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// PWrite implements vfs.FileDescriptionImpl.
func (s *SocketVFS2) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Write implements vfs.FileDescriptionImpl.
@@ -142,7 +142,7 @@ func (s *SocketVFS2) Write(ctx context.Context, src usermem.IOSequence, opts vfs
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
n, err := s.sendMsg(ctx, src, nil, 0, socket.ControlMessages{})
diff --git a/pkg/sentry/socket/netstack/BUILD b/pkg/sentry/socket/netstack/BUILD
index 9561b7c25..bf5ec4558 100644
--- a/pkg/sentry/socket/netstack/BUILD
+++ b/pkg/sentry/socket/netstack/BUILD
@@ -19,7 +19,9 @@ go_library(
],
deps = [
"//pkg/abi/linux",
+ "//pkg/abi/linux/errno",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/marshal",
@@ -40,13 +42,13 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/tcpip/header",
"//pkg/tcpip/link/tun",
"//pkg/tcpip/network/ipv4",
"//pkg/tcpip/network/ipv6",
"//pkg/tcpip/stack",
+ "//pkg/tcpip/transport",
"//pkg/tcpip/transport/tcp",
"//pkg/tcpip/transport/udp",
"//pkg/usermem",
diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go
index d4b1bad67..aa081e90d 100644
--- a/pkg/sentry/socket/netstack/netstack.go
+++ b/pkg/sentry/socket/netstack/netstack.go
@@ -36,7 +36,9 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/abi/linux/errno"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal"
@@ -47,18 +49,18 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/fsutil"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/netfilter"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
+ "gvisor.dev/gvisor/pkg/tcpip/transport"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -271,6 +273,7 @@ var Metrics = tcpip.Stats{
Timeouts: mustCreateMetric("/netstack/tcp/timeouts", "Number of times RTO expired."),
ChecksumErrors: mustCreateMetric("/netstack/tcp/checksum_errors", "Number of segments dropped due to bad checksums."),
FailedPortReservations: mustCreateMetric("/netstack/tcp/failed_port_reservations", "Number of time TCP failed to reserve a port."),
+ SegmentsAckedWithDSACK: mustCreateMetric("/netstack/tcp/segments_acked_with_dsack", "Number of segments for which DSACK was received."),
},
UDP: tcpip.UDPStats{
PacketsReceived: mustCreateMetric("/netstack/udp/packets_received", "Number of UDP datagrams received via HandlePacket."),
@@ -289,7 +292,7 @@ const DefaultTTL = 64
const sizeOfInt32 int = 4
-var errStackType = syserr.New("expected but did not receive a netstack.Stack", linux.EINVAL)
+var errStackType = syserr.New("expected but did not receive a netstack.Stack", errno.EINVAL)
// commonEndpoint represents the intersection of a tcpip.Endpoint and a
// transport.Endpoint.
@@ -416,6 +419,27 @@ func bytesToIPAddress(addr []byte) tcpip.Address {
return tcpip.Address(addr)
}
+// minSockAddrLen returns the minimum length in bytes of a socket address for
+// the socket's family.
+func (s *socketOpsCommon) minSockAddrLen() int {
+ const addressFamilySize = 2
+
+ switch s.family {
+ case linux.AF_UNIX:
+ return addressFamilySize
+ case linux.AF_INET:
+ return sockAddrInetSize
+ case linux.AF_INET6:
+ return sockAddrInet6Size
+ case linux.AF_PACKET:
+ return sockAddrLinkSize
+ case linux.AF_UNSPEC:
+ return addressFamilySize
+ default:
+ panic(fmt.Sprintf("s.family unrecognized = %d", s.family))
+ }
+}
+
func (s *socketOpsCommon) isPacketBased() bool {
return s.skType == linux.SOCK_DGRAM || s.skType == linux.SOCK_SEQPACKET || s.skType == linux.SOCK_RDM || s.skType == linux.SOCK_RAW
}
@@ -455,7 +479,7 @@ func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOS
}
n, _, _, _, _, err := s.nonBlockingRead(ctx, dst, false, false, false)
if err == syserr.ErrWouldBlock {
- return int64(n), syserror.ErrWouldBlock
+ return int64(n), linuxerr.ErrWouldBlock
}
if err != nil {
return 0, err.ToError()
@@ -488,14 +512,14 @@ func (s *SocketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IO
r := src.Reader(ctx)
n, err := s.Endpoint.Write(r, tcpip.WriteOptions{})
if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
if err != nil {
return 0, syserr.TranslateNetstackError(err).ToError()
}
if n < src.NumBytes() {
- return n, syserror.ErrWouldBlock
+ return n, linuxerr.ErrWouldBlock
}
return n, nil
@@ -542,16 +566,21 @@ func (s *socketOpsCommon) Readiness(mask waiter.EventMask) waiter.EventMask {
return s.Endpoint.Readiness(mask)
}
-func (s *socketOpsCommon) checkFamily(family uint16, exact bool) *syserr.Error {
+// checkFamily returns true iff the specified address family may be used with
+// the socket.
+//
+// If exact is true, then the specified address family must be an exact match
+// with the socket's family.
+func (s *socketOpsCommon) checkFamily(family uint16, exact bool) bool {
if family == uint16(s.family) {
- return nil
+ return true
}
if !exact && family == linux.AF_INET && s.family == linux.AF_INET6 {
if !s.Endpoint.SocketOptions().GetV6Only() {
- return nil
+ return true
}
}
- return syserr.ErrInvalidArgument
+ return false
}
// mapFamily maps the AF_INET ANY address to the IPv4-mapped IPv6 ANY if the
@@ -584,8 +613,8 @@ func (s *socketOpsCommon) Connect(t *kernel.Task, sockaddr []byte, blocking bool
return syserr.TranslateNetstackError(err)
}
- if err := s.checkFamily(family, false /* exact */); err != nil {
- return err
+ if !s.checkFamily(family, false /* exact */) {
+ return syserr.ErrInvalidArgument
}
addr = s.mapFamily(addr, family)
@@ -643,23 +672,24 @@ func (s *socketOpsCommon) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {
}
a.UnmarshalBytes(sockaddr[:sockAddrLinkSize])
- if a.Protocol != uint16(s.protocol) {
- return syserr.ErrInvalidArgument
- }
-
addr = tcpip.FullAddress{
NIC: tcpip.NICID(a.InterfaceIndex),
Addr: tcpip.Address(a.HardwareAddr[:header.EthernetAddressSize]),
+ Port: socket.Ntohs(a.Protocol),
}
} else {
+ if s.minSockAddrLen() > len(sockaddr) {
+ return syserr.ErrInvalidArgument
+ }
+
var err *syserr.Error
addr, family, err = socket.AddressAndFamily(sockaddr)
if err != nil {
return err
}
- if err = s.checkFamily(family, true /* exact */); err != nil {
- return err
+ if !s.checkFamily(family, true /* exact */) {
+ return syserr.ErrAddressFamilyNotSupported
}
addr = s.mapFamily(addr, family)
@@ -1680,6 +1710,26 @@ func SetSockOpt(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, level int
return nil
}
+func clampBufSize(newSz, min, max int64, ignoreMax bool) int64 {
+ // packetOverheadFactor is used to multiply the value provided by the user on
+ // a setsockopt(2) for setting the send/receive buffer sizes sockets.
+ const packetOverheadFactor = 2
+
+ if !ignoreMax && newSz > max {
+ newSz = max
+ }
+
+ if newSz < math.MaxInt32/packetOverheadFactor {
+ newSz *= packetOverheadFactor
+ if newSz < min {
+ newSz = min
+ }
+ } else {
+ newSz = math.MaxInt32
+ }
+ return newSz
+}
+
// setSockOptSocket implements SetSockOpt when level is SOL_SOCKET.
func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name int, optVal []byte) *syserr.Error {
switch name {
@@ -1689,7 +1739,9 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
}
v := hostarch.ByteOrder.Uint32(optVal)
- ep.SocketOptions().SetSendBufferSize(int64(v), true /* notify */)
+ min, max := ep.SocketOptions().SendBufferLimits()
+ clamped := clampBufSize(int64(v), min, max, false /* ignoreMax */)
+ ep.SocketOptions().SetSendBufferSize(clamped, true /* notify */)
return nil
case linux.SO_RCVBUF:
@@ -1698,7 +1750,24 @@ func setSockOptSocket(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, nam
}
v := hostarch.ByteOrder.Uint32(optVal)
- ep.SocketOptions().SetReceiveBufferSize(int64(v), true /* notify */)
+ min, max := ep.SocketOptions().ReceiveBufferLimits()
+ clamped := clampBufSize(int64(v), min, max, false /* ignoreMax */)
+ ep.SocketOptions().SetReceiveBufferSize(clamped, true /* notify */)
+ return nil
+
+ case linux.SO_RCVBUFFORCE:
+ if len(optVal) < sizeOfInt32 {
+ return syserr.ErrInvalidArgument
+ }
+
+ if creds := auth.CredentialsFromContext(t); !creds.HasCapability(linux.CAP_NET_ADMIN) {
+ return syserr.ErrNotPermitted
+ }
+
+ v := hostarch.ByteOrder.Uint32(optVal)
+ min, max := ep.SocketOptions().ReceiveBufferLimits()
+ clamped := clampBufSize(int64(v), min, max, true /* ignoreMax */)
+ ep.SocketOptions().SetReceiveBufferSize(clamped, true /* notify */)
return nil
case linux.SO_REUSEADDR:
@@ -2003,7 +2072,7 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
if isTCPSocket(skType, skProto) && tcp.EndpointState(ep.State()) != tcp.StateInitial {
return syserr.ErrInvalidEndpointState
- } else if isUDPSocket(skType, skProto) && udp.EndpointState(ep.State()) != udp.StateInitial {
+ } else if isUDPSocket(skType, skProto) && transport.DatagramEndpointState(ep.State()) != transport.DatagramEndpointStateInitial {
return syserr.ErrInvalidEndpointState
}
@@ -2105,10 +2174,10 @@ func setSockOptIPv6(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name
return syserr.ErrNoDevice
}
// Stack must be a netstack stack.
- return netfilter.SetEntries(stack.(*Stack).Stack, optVal, true)
+ return netfilter.SetEntries(t, stack.(*Stack).Stack, optVal, true)
case linux.IP6T_SO_SET_ADD_COUNTERS:
- // TODO(gvisor.dev/issue/170): Counter support.
+ log.Infof("IP6T_SO_SET_ADD_COUNTERS is not supported")
return nil
default:
@@ -2348,10 +2417,10 @@ func setSockOptIP(t *kernel.Task, s socket.SocketOps, ep commonEndpoint, name in
return syserr.ErrNoDevice
}
// Stack must be a netstack stack.
- return netfilter.SetEntries(stack.(*Stack).Stack, optVal, false)
+ return netfilter.SetEntries(t, stack.(*Stack).Stack, optVal, false)
case linux.IPT_SO_SET_ADD_COUNTERS:
- // TODO(gvisor.dev/issue/170): Counter support.
+ log.Infof("IPT_SO_SET_ADD_COUNTERS is not supported")
return nil
case linux.IP_ADD_SOURCE_MEMBERSHIP,
@@ -2808,7 +2877,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
if n > 0 {
return n, msgFlags, senderAddr, senderAddrLen, controlMessages, nil
}
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return 0, 0, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain
}
return 0, 0, nil, 0, socket.ControlMessages{}, syserr.FromError(err)
@@ -2830,8 +2899,8 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
if err != nil {
return 0, err
}
- if err := s.checkFamily(family, false /* exact */); err != nil {
- return 0, err
+ if !s.checkFamily(family, false /* exact */) {
+ return 0, syserr.ErrInvalidArgument
}
addrBuf = s.mapFamily(addrBuf, family)
@@ -2876,7 +2945,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
// became available between when we last checked and when we setup
// the notification.
if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return int(total), syserr.ErrTryAgain
}
// handleIOError will consume errors from t.Block if needed.
@@ -2908,7 +2977,7 @@ func (s *socketOpsCommon) ioctl(ctx context.Context, io usermem.IO, args arch.Sy
s.readMu.Lock()
defer s.readMu.Unlock()
if !s.timestampValid {
- return 0, syserror.ENOENT
+ return 0, linuxerr.ENOENT
}
tv := linux.NsecToTimeval(s.timestampNS)
@@ -3014,7 +3083,7 @@ func Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.Sysc
unimpl.EmitUnimplementedEvent(ctx)
}
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// interfaceIoctl implements interface requests.
@@ -3289,10 +3358,10 @@ func (s *socketOpsCommon) State() uint32 {
}
case isUDPSocket(s.skType, s.protocol):
// UDP socket.
- switch udp.EndpointState(s.Endpoint.State()) {
- case udp.StateInitial, udp.StateBound, udp.StateClosed:
+ switch transport.DatagramEndpointState(s.Endpoint.State()) {
+ case transport.DatagramEndpointStateInitial, transport.DatagramEndpointStateBound, transport.DatagramEndpointStateClosed:
return linux.TCP_CLOSE
- case udp.StateConnected:
+ case transport.DatagramEndpointStateConnected:
return linux.TCP_ESTABLISHED
default:
return 0
diff --git a/pkg/sentry/socket/netstack/netstack_vfs2.go b/pkg/sentry/socket/netstack/netstack_vfs2.go
index 30f3ad153..3cdf29b80 100644
--- a/pkg/sentry/socket/netstack/netstack_vfs2.go
+++ b/pkg/sentry/socket/netstack/netstack_vfs2.go
@@ -17,6 +17,7 @@ package netstack
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -26,7 +27,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -104,7 +104,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
if dst.NumBytes() == 0 {
@@ -112,7 +112,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
}
n, _, _, _, _, err := s.nonBlockingRead(ctx, dst, false, false, false)
if err == syserr.ErrWouldBlock {
- return int64(n), syserror.ErrWouldBlock
+ return int64(n), linuxerr.ErrWouldBlock
}
if err != nil {
return 0, err.ToError()
@@ -125,20 +125,20 @@ func (s *SocketVFS2) Write(ctx context.Context, src usermem.IOSequence, opts vfs
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
r := src.Reader(ctx)
n, err := s.Endpoint.Write(r, tcpip.WriteOptions{})
if _, ok := err.(*tcpip.ErrWouldBlock); ok {
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
if err != nil {
return 0, syserr.TranslateNetstackError(err).ToError()
}
if n < src.NumBytes() {
- return n, syserror.ErrWouldBlock
+ return n, linuxerr.ErrWouldBlock
}
return n, nil
diff --git a/pkg/sentry/socket/netstack/stack.go b/pkg/sentry/socket/netstack/stack.go
index eef5e6519..ea199f223 100644
--- a/pkg/sentry/socket/netstack/stack.go
+++ b/pkg/sentry/socket/netstack/stack.go
@@ -18,10 +18,10 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
@@ -71,6 +71,12 @@ func (s *Stack) Interfaces() map[int32]inet.Interface {
return is
}
+// RemoveInterface implements inet.Stack.RemoveInterface.
+func (s *Stack) RemoveInterface(idx int32) error {
+ nic := tcpip.NICID(idx)
+ return syserr.TranslateNetstackError(s.Stack.RemoveNIC(nic)).ToError()
+}
+
// InterfaceAddrs implements inet.Stack.InterfaceAddrs.
func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr {
nicAddrs := make(map[int32][]inet.InterfaceAddr)
@@ -110,24 +116,24 @@ func convertAddr(addr inet.InterfaceAddr) (tcpip.ProtocolAddress, error) {
switch addr.Family {
case linux.AF_INET:
if len(addr.Addr) != header.IPv4AddressSize {
- return protocolAddress, syserror.EINVAL
+ return protocolAddress, linuxerr.EINVAL
}
if addr.PrefixLen > header.IPv4AddressSize*8 {
- return protocolAddress, syserror.EINVAL
+ return protocolAddress, linuxerr.EINVAL
}
protocol = ipv4.ProtocolNumber
address = tcpip.Address(addr.Addr)
case linux.AF_INET6:
if len(addr.Addr) != header.IPv6AddressSize {
- return protocolAddress, syserror.EINVAL
+ return protocolAddress, linuxerr.EINVAL
}
if addr.PrefixLen > header.IPv6AddressSize*8 {
- return protocolAddress, syserror.EINVAL
+ return protocolAddress, linuxerr.EINVAL
}
protocol = ipv6.ProtocolNumber
address = tcpip.Address(addr.Addr)
default:
- return protocolAddress, syserror.ENOTSUP
+ return protocolAddress, linuxerr.ENOTSUP
}
protocolAddress = tcpip.ProtocolAddress{
@@ -149,7 +155,7 @@ func (s *Stack) AddInterfaceAddr(idx int32, addr inet.InterfaceAddr) error {
// Attach address to interface.
nicID := tcpip.NICID(idx)
- if err := s.Stack.AddProtocolAddressWithOptions(nicID, protocolAddress, stack.CanBePrimaryEndpoint); err != nil {
+ if err := s.Stack.AddProtocolAddress(nicID, protocolAddress, stack.AddressProperties{}); err != nil {
return syserr.TranslateNetstackError(err).ToError()
}
diff --git a/pkg/sentry/socket/netstack/tun.go b/pkg/sentry/socket/netstack/tun.go
index c7ed52702..e67fe9700 100644
--- a/pkg/sentry/socket/netstack/tun.go
+++ b/pkg/sentry/socket/netstack/tun.go
@@ -16,7 +16,7 @@ package netstack
import (
"gvisor.dev/gvisor/pkg/abi/linux"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/tcpip/link/tun"
)
@@ -41,7 +41,7 @@ func LinuxToTUNFlags(flags uint16) (tun.Flags, error) {
// when there is no sk_filter. See __tun_chr_ioctl() in
// net/drivers/tun.c.
if flags&^uint16(linux.IFF_TUN|linux.IFF_TAP|linux.IFF_NO_PI|linux.IFF_ONE_QUEUE) != 0 {
- return tun.Flags{}, syserror.EINVAL
+ return tun.Flags{}, linuxerr.EINVAL
}
return tun.Flags{
TUN: flags&linux.IFF_TUN != 0,
diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go
index 353f4ade0..841d5bd55 100644
--- a/pkg/sentry/socket/socket.go
+++ b/pkg/sentry/socket/socket.go
@@ -509,7 +509,6 @@ func SetSockOptEmitUnimplementedEvent(t *kernel.Task, name int) {
linux.SO_ATTACH_REUSEPORT_EBPF,
linux.SO_CNX_ADVICE,
linux.SO_DETACH_FILTER,
- linux.SO_RCVBUFFORCE,
linux.SO_SNDBUFFORCE:
t.Kernel().EmitUnimplementedEvent(t)
@@ -659,7 +658,6 @@ func ConvertAddress(family int, addr tcpip.FullAddress) (linux.SockAddr, uint32)
return &out, uint32(sockAddrInet6Size)
case linux.AF_PACKET:
- // TODO(gvisor.dev/issue/173): Return protocol too.
var out linux.SockAddrLink
out.Family = linux.AF_PACKET
out.InterfaceIndex = int32(addr.NIC)
@@ -745,14 +743,16 @@ func AddressAndFamily(addr []byte) (tcpip.FullAddress, uint16, *syserr.Error) {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
a.UnmarshalUnsafe(addr[:sockAddrLinkSize])
+ // TODO(https://gvisor.dev/issue/6530): Do not assume all interfaces have
+ // an ethernet address.
if a.Family != linux.AF_PACKET || a.HardwareAddrLen != header.EthernetAddressSize {
return tcpip.FullAddress{}, family, syserr.ErrInvalidArgument
}
- // TODO(gvisor.dev/issue/173): Return protocol too.
return tcpip.FullAddress{
NIC: tcpip.NICID(a.InterfaceIndex),
Addr: tcpip.Address(a.HardwareAddr[:header.EthernetAddressSize]),
+ Port: Ntohs(a.Protocol),
}, family, nil
case linux.AF_UNSPEC:
diff --git a/pkg/sentry/socket/unix/BUILD b/pkg/sentry/socket/unix/BUILD
index c9cbefb3a..7b546c04d 100644
--- a/pkg/sentry/socket/unix/BUILD
+++ b/pkg/sentry/socket/unix/BUILD
@@ -39,6 +39,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/hostarch",
"//pkg/log",
@@ -61,7 +62,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/vfs",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/usermem",
"//pkg/waiter",
diff --git a/pkg/sentry/socket/unix/transport/connectioned.go b/pkg/sentry/socket/unix/transport/connectioned.go
index 33f9aeb06..b3f0cf563 100644
--- a/pkg/sentry/socket/unix/transport/connectioned.go
+++ b/pkg/sentry/socket/unix/transport/connectioned.go
@@ -129,9 +129,9 @@ func newConnectioned(ctx context.Context, stype linux.SockType, uid UniqueIDProv
stype: stype,
}
+ ep.ops.InitHandler(ep, &stackHandler{}, getSendBufferLimits, getReceiveBufferLimits)
ep.ops.SetSendBufferSize(defaultBufferSize, false /* notify */)
ep.ops.SetReceiveBufferSize(defaultBufferSize, false /* notify */)
- ep.ops.InitHandler(ep, &stackHandler{}, getSendBufferLimits, getReceiveBufferLimits)
return ep
}
@@ -406,14 +406,15 @@ func (e *connectionedEndpoint) Listen(backlog int) *syserr.Error {
// Accept accepts a new connection.
func (e *connectionedEndpoint) Accept(peerAddr *tcpip.FullAddress) (Endpoint, *syserr.Error) {
e.Lock()
- defer e.Unlock()
if !e.Listening() {
+ e.Unlock()
return nil, syserr.ErrInvalidEndpointState
}
select {
case ne := <-e.acceptedChan:
+ e.Unlock()
if peerAddr != nil {
ne.Lock()
c := ne.connected
@@ -429,6 +430,7 @@ func (e *connectionedEndpoint) Accept(peerAddr *tcpip.FullAddress) (Endpoint, *s
return ne, nil
default:
+ e.Unlock()
// Nothing left.
return nil, syserr.ErrWouldBlock
}
@@ -517,3 +519,6 @@ func (e *connectionedEndpoint) OnSetSendBufferSize(v int64) (newSz int64) {
}
return v
}
+
+// WakeupWriters implements tcpip.SocketOptionsHandler.WakeupWriters.
+func (e *connectionedEndpoint) WakeupWriters() {}
diff --git a/pkg/sentry/socket/unix/transport/connectionless.go b/pkg/sentry/socket/unix/transport/connectionless.go
index 61338728a..61311718e 100644
--- a/pkg/sentry/socket/unix/transport/connectionless.go
+++ b/pkg/sentry/socket/unix/transport/connectionless.go
@@ -44,9 +44,9 @@ func NewConnectionless(ctx context.Context) Endpoint {
q := queue{ReaderQueue: ep.Queue, WriterQueue: &waiter.Queue{}, limit: defaultBufferSize}
q.InitRefs()
ep.receiver = &queueReceiver{readQueue: &q}
+ ep.ops.InitHandler(ep, &stackHandler{}, getSendBufferLimits, getReceiveBufferLimits)
ep.ops.SetSendBufferSize(defaultBufferSize, false /* notify */)
ep.ops.SetReceiveBufferSize(defaultBufferSize, false /* notify */)
- ep.ops.InitHandler(ep, &stackHandler{}, getSendBufferLimits, getReceiveBufferLimits)
return ep
}
@@ -227,3 +227,6 @@ func (e *connectionlessEndpoint) OnSetSendBufferSize(v int64) (newSz int64) {
}
return v
}
+
+// WakeupWriters implements tcpip.SocketOptionsHandler.WakeupWriters.
+func (e *connectionlessEndpoint) WakeupWriters() {}
diff --git a/pkg/sentry/socket/unix/transport/queue.go b/pkg/sentry/socket/unix/transport/queue.go
index e4de44498..a9cedcf5f 100644
--- a/pkg/sentry/socket/unix/transport/queue.go
+++ b/pkg/sentry/socket/unix/transport/queue.go
@@ -133,7 +133,7 @@ func (q *queue) Enqueue(ctx context.Context, data [][]byte, c ControlMessages, f
free := q.limit - q.used
if l > free && truncate {
- if free == 0 {
+ if free <= 0 {
// Message can't fit right now.
q.mu.Unlock()
return 0, false, syserr.ErrWouldBlock
diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go
index db7b1affe..e9e482017 100644
--- a/pkg/sentry/socket/unix/unix.go
+++ b/pkg/sentry/socket/unix/unix.go
@@ -23,6 +23,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
@@ -37,7 +38,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -493,7 +493,7 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
}
n, err := src.CopyInTo(t, &w)
- if err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {
+ if err != linuxerr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 {
return int(n), syserr.FromError(err)
}
@@ -513,13 +513,13 @@ func (s *socketOpsCommon) SendMsg(t *kernel.Task, src usermem.IOSequence, to []b
n, err = src.CopyInTo(t, &w)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -647,7 +647,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
}
var total int64
- if n, err := doRead(); err != syserror.ErrWouldBlock || dontWait {
+ if n, err := doRead(); err != linuxerr.ErrWouldBlock || dontWait {
var from linux.SockAddr
var fromLen uint32
if r.From != nil && len([]byte(r.From.Addr)) != 0 {
@@ -682,7 +682,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
defer s.EventUnregister(&e)
for {
- if n, err := doRead(); err != syserror.ErrWouldBlock {
+ if n, err := doRead(); err != linuxerr.ErrWouldBlock {
var from linux.SockAddr
var fromLen uint32
if r.From != nil {
@@ -719,7 +719,7 @@ func (s *socketOpsCommon) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags
if total > 0 {
err = nil
}
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return int(total), msgFlags, nil, 0, socket.ControlMessages{}, syserr.ErrTryAgain
}
return int(total), msgFlags, nil, 0, socket.ControlMessages{}, syserr.FromError(err)
diff --git a/pkg/sentry/socket/unix/unix_vfs2.go b/pkg/sentry/socket/unix/unix_vfs2.go
index c39e317ff..8c5075a1c 100644
--- a/pkg/sentry/socket/unix/unix_vfs2.go
+++ b/pkg/sentry/socket/unix/unix_vfs2.go
@@ -17,6 +17,7 @@ package unix
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
@@ -29,7 +30,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -236,7 +236,7 @@ func (s *SocketVFS2) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error {
Mode: linux.FileMode(linux.S_IFSOCK | uint(stat.Mode)&^t.FSContext().Umask()),
Endpoint: bep,
})
- if err == syserror.EEXIST {
+ if linuxerr.Equals(linuxerr.EEXIST, err) {
return syserr.ErrAddressInUse
}
return syserr.FromError(err)
@@ -253,7 +253,7 @@ func (s *SocketVFS2) Ioctl(ctx context.Context, uio usermem.IO, args arch.Syscal
// PRead implements vfs.FileDescriptionImpl.
func (s *SocketVFS2) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Read implements vfs.FileDescriptionImpl.
@@ -261,7 +261,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
if dst.NumBytes() == 0 {
@@ -282,7 +282,7 @@ func (s *SocketVFS2) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.
// PWrite implements vfs.FileDescriptionImpl.
func (s *SocketVFS2) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Write implements vfs.FileDescriptionImpl.
@@ -290,7 +290,7 @@ func (s *SocketVFS2) Write(ctx context.Context, src usermem.IOSequence, opts vfs
// All flags other than RWF_NOWAIT should be ignored.
// TODO(gvisor.dev/issue/2601): Support RWF_NOWAIT.
if opts.Flags != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
t := kernel.TaskFromContext(ctx)
diff --git a/pkg/sentry/state/BUILD b/pkg/sentry/state/BUILD
index 3e801182c..7f02807c5 100644
--- a/pkg/sentry/state/BUILD
+++ b/pkg/sentry/state/BUILD
@@ -13,6 +13,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/log",
"//pkg/sentry/inet",
"//pkg/sentry/kernel",
@@ -20,7 +21,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sentry/watchdog",
"//pkg/state/statefile",
- "//pkg/syserror",
"@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/state/state.go b/pkg/sentry/state/state.go
index 167754537..e9d544f3d 100644
--- a/pkg/sentry/state/state.go
+++ b/pkg/sentry/state/state.go
@@ -20,6 +20,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -27,7 +28,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sentry/watchdog"
"gvisor.dev/gvisor/pkg/state/statefile"
- "gvisor.dev/gvisor/pkg/syserror"
)
var previousMetadata map[string]string
@@ -88,7 +88,7 @@ func (opts SaveOpts) Save(ctx context.Context, k *kernel.Kernel, w *watchdog.Wat
// ENOSPC is a state file error. This error can only come from
// writing the state file, and not from fs.FileOperations.Fsync
// because we wrap those in kernel.TaskSet.flushWritesToFiles.
- if err == syserror.ENOSPC {
+ if linuxerr.Equals(linuxerr.ENOSPC, err) {
err = ErrStateFile{err}
}
@@ -110,7 +110,7 @@ type LoadOpts struct {
}
// Load loads the given kernel, setting the provided platform and stack.
-func (opts LoadOpts) Load(ctx context.Context, k *kernel.Kernel, n inet.Stack, clocks time.Clocks, vfsOpts *vfs.CompleteRestoreOptions) error {
+func (opts LoadOpts) Load(ctx context.Context, k *kernel.Kernel, timeReady chan struct{}, n inet.Stack, clocks time.Clocks, vfsOpts *vfs.CompleteRestoreOptions) error {
// Open the file.
r, m, err := statefile.NewReader(opts.Source, opts.Key)
if err != nil {
@@ -120,5 +120,5 @@ func (opts LoadOpts) Load(ctx context.Context, k *kernel.Kernel, n inet.Stack, c
previousMetadata = m
// Restore the Kernel object graph.
- return k.LoadFrom(ctx, r, n, clocks, vfsOpts)
+ return k.LoadFrom(ctx, r, timeReady, n, clocks, vfsOpts)
}
diff --git a/pkg/sentry/state/state_metadata.go b/pkg/sentry/state/state_metadata.go
index cefd20b9b..c42297c80 100644
--- a/pkg/sentry/state/state_metadata.go
+++ b/pkg/sentry/state/state_metadata.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package state
import (
diff --git a/pkg/sentry/strace/linux64_amd64.go b/pkg/sentry/strace/linux64_amd64.go
index 6ce1bb592..317c3c31c 100644
--- a/pkg/sentry/strace/linux64_amd64.go
+++ b/pkg/sentry/strace/linux64_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package strace
diff --git a/pkg/sentry/strace/linux64_arm64.go b/pkg/sentry/strace/linux64_arm64.go
index ce5594301..65f27c810 100644
--- a/pkg/sentry/strace/linux64_arm64.go
+++ b/pkg/sentry/strace/linux64_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package strace
diff --git a/pkg/sentry/strace/strace.go b/pkg/sentry/strace/strace.go
index af7088847..757ff2a40 100644
--- a/pkg/sentry/strace/strace.go
+++ b/pkg/sentry/strace/strace.go
@@ -133,6 +133,9 @@ func dump(t *kernel.Task, addr hostarch.Addr, size uint, maximumBlobSize uint) s
}
func path(t *kernel.Task, addr hostarch.Addr) string {
+ if addr == 0 {
+ return "<null>"
+ }
path, err := t.CopyInString(addr, linux.PATH_MAX)
if err != nil {
return fmt.Sprintf("%#x (error decoding path: %s)", addr, err)
@@ -816,10 +819,10 @@ func convertToSyscallFlag(sinks SinkType) uint32 {
return ret
}
-// Enable enables the syscalls in whitelist in all syscall tables.
+// Enable enables the syscalls in allowlist in all syscall tables.
//
// Preconditions: Initialize has been called.
-func Enable(whitelist []string, sinks SinkType) error {
+func Enable(allowlist []string, sinks SinkType) error {
flags := convertToSyscallFlag(sinks)
for _, table := range kernel.SyscallTables() {
// Is this known?
@@ -829,7 +832,7 @@ func Enable(whitelist []string, sinks SinkType) error {
}
// Convert to a set of system calls numbers.
- wl, err := sys.ConvertToSysnoMap(whitelist)
+ wl, err := sys.ConvertToSysnoMap(allowlist)
if err != nil {
return err
}
diff --git a/pkg/sentry/syscalls/BUILD b/pkg/sentry/syscalls/BUILD
index b8d1bd415..7a7c80ac6 100644
--- a/pkg/sentry/syscalls/BUILD
+++ b/pkg/sentry/syscalls/BUILD
@@ -11,11 +11,11 @@ go_library(
visibility = ["//:sandbox"],
deps = [
"//pkg/abi/linux",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/arch",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel/epoll",
"//pkg/sentry/kernel/time",
- "//pkg/syserror",
"//pkg/waiter",
],
)
diff --git a/pkg/sentry/syscalls/epoll.go b/pkg/sentry/syscalls/epoll.go
index 3b4d79889..a69ed0746 100644
--- a/pkg/sentry/syscalls/epoll.go
+++ b/pkg/sentry/syscalls/epoll.go
@@ -18,10 +18,10 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/epoll"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -45,21 +45,21 @@ func AddEpoll(t *kernel.Task, epfd int32, fd int32, flags epoll.EntryFlags, mask
// Get epoll from the file descriptor.
epollfile := t.GetFile(epfd)
if epollfile == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer epollfile.DecRef(t)
// Get the target file id.
file := t.GetFile(fd)
if file == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the epollPoll operations.
e, ok := epollfile.FileOperations.(*epoll.EventPoll)
if !ok {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Try to add the entry.
@@ -71,21 +71,21 @@ func UpdateEpoll(t *kernel.Task, epfd int32, fd int32, flags epoll.EntryFlags, m
// Get epoll from the file descriptor.
epollfile := t.GetFile(epfd)
if epollfile == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer epollfile.DecRef(t)
// Get the target file id.
file := t.GetFile(fd)
if file == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the epollPoll operations.
e, ok := epollfile.FileOperations.(*epoll.EventPoll)
if !ok {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Try to update the entry.
@@ -97,21 +97,21 @@ func RemoveEpoll(t *kernel.Task, epfd int32, fd int32) error {
// Get epoll from the file descriptor.
epollfile := t.GetFile(epfd)
if epollfile == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer epollfile.DecRef(t)
// Get the target file id.
file := t.GetFile(fd)
if file == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the epollPoll operations.
e, ok := epollfile.FileOperations.(*epoll.EventPoll)
if !ok {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Try to remove the entry.
@@ -123,14 +123,14 @@ func WaitEpoll(t *kernel.Task, fd int32, max int, timeoutInNanos int64) ([]linux
// Get epoll from the file descriptor.
epollfile := t.GetFile(fd)
if epollfile == nil {
- return nil, syserror.EBADF
+ return nil, linuxerr.EBADF
}
defer epollfile.DecRef(t)
// Extract the epollPoll operations.
e, ok := epollfile.FileOperations.(*epoll.EventPoll)
if !ok {
- return nil, syserror.EBADF
+ return nil, linuxerr.EBADF
}
// Try to read events and return right away if we got them or if the
@@ -163,7 +163,7 @@ func WaitEpoll(t *kernel.Task, fd int32, max int, timeoutInNanos int64) ([]linux
}
if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return nil, nil
}
diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD
index 408a6c422..394396cde 100644
--- a/pkg/sentry/syscalls/linux/BUILD
+++ b/pkg/sentry/syscalls/linux/BUILD
@@ -25,6 +25,7 @@ go_library(
"sys_mempolicy.go",
"sys_mmap.go",
"sys_mount.go",
+ "sys_msgqueue.go",
"sys_pipe.go",
"sys_poll.go",
"sys_prctl.go",
@@ -64,6 +65,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/bpf",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/hostarch",
"//pkg/log",
"//pkg/marshal",
@@ -83,6 +85,8 @@ go_library(
"//pkg/sentry/kernel/epoll",
"//pkg/sentry/kernel/eventfd",
"//pkg/sentry/kernel/fasync",
+ "//pkg/sentry/kernel/ipc",
+ "//pkg/sentry/kernel/msgqueue",
"//pkg/sentry/kernel/pipe",
"//pkg/sentry/kernel/sched",
"//pkg/sentry/kernel/shm",
@@ -100,7 +104,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
diff --git a/pkg/sentry/syscalls/linux/error.go b/pkg/sentry/syscalls/linux/error.go
index 6eabfd219..f4d549a3f 100644
--- a/pkg/sentry/syscalls/linux/error.go
+++ b/pkg/sentry/syscalls/linux/error.go
@@ -19,13 +19,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
var (
@@ -89,18 +89,18 @@ func handleIOErrorImpl(ctx context.Context, partialResult bool, errOrig, intr er
}
// Translate error, if possible, to consolidate errors from other packages
- // into a smaller set of errors from syserror package.
+ // into a smaller set of errors from linuxerr package.
translatedErr := errOrig
- if errno, ok := syserror.TranslateError(errOrig); ok {
+ if errno, ok := linuxerr.TranslateError(errOrig); ok {
translatedErr = errno
}
- switch translatedErr {
- case io.EOF:
+ switch {
+ case translatedErr == io.EOF:
// EOF is always consumed. If this is a partial read/write
// (result != 0), the application will see that, otherwise
// they will see 0.
return true, nil
- case syserror.EFBIG:
+ case linuxerr.Equals(linuxerr.EFBIG, translatedErr):
t := kernel.TaskFromContext(ctx)
if t == nil {
panic("I/O error should only occur from a context associated with a Task")
@@ -112,8 +112,8 @@ func handleIOErrorImpl(ctx context.Context, partialResult bool, errOrig, intr er
// Do not consume the error and return it as EFBIG.
// Simultaneously send a SIGXFSZ per setrlimit(2).
t.SendSignal(kernel.SignalInfoNoInfo(linux.SIGXFSZ, t, t))
- return true, syserror.EFBIG
- case syserror.EINTR:
+ return true, linuxerr.EFBIG
+ case linuxerr.Equals(linuxerr.EINTR, translatedErr):
// The syscall was interrupted. Return nil if it completed
// partially, otherwise return the error code that the syscall
// needs (to indicate to the kernel what it should do).
@@ -128,21 +128,21 @@ func handleIOErrorImpl(ctx context.Context, partialResult bool, errOrig, intr er
return true, errOrig
}
- switch translatedErr {
- case syserror.EINTR:
+ switch {
+ case linuxerr.Equals(linuxerr.EINTR, translatedErr):
// Syscall interrupted, but completed a partial
// read/write. Like ErrWouldBlock, since we have a
// partial read/write, we consume the error and return
// the partial result.
return true, nil
- case syserror.EFAULT:
+ case linuxerr.Equals(linuxerr.EFAULT, translatedErr):
// EFAULT is only shown the user if nothing was
// read/written. If we read something (this case), they see
// a partial read/write. They will then presumably try again
// with an incremented buffer, which will EFAULT with
// result == 0.
return true, nil
- case syserror.EPIPE:
+ case linuxerr.Equals(linuxerr.EPIPE, translatedErr):
// Writes to a pipe or socket will return EPIPE if the other
// side is gone. The partial write is returned. EPIPE will be
// returned on the next call.
@@ -150,24 +150,23 @@ func handleIOErrorImpl(ctx context.Context, partialResult bool, errOrig, intr er
// TODO(gvisor.dev/issue/161): In some cases SIGPIPE should
// also be sent to the application.
return true, nil
- case syserror.ENOSPC:
+ case linuxerr.Equals(linuxerr.ENOSPC, translatedErr):
// Similar to EPIPE. Return what we wrote this time, and let
// ENOSPC be returned on the next call.
return true, nil
- case syserror.ECONNRESET, syserror.ETIMEDOUT:
+ case linuxerr.Equals(linuxerr.ECONNRESET, translatedErr):
+ fallthrough
+ case linuxerr.Equals(linuxerr.ETIMEDOUT, translatedErr):
// For TCP sendfile connections, we may have a reset or timeout. But we
// should just return n as the result.
return true, nil
- case syserror.EWOULDBLOCK:
+ case linuxerr.Equals(linuxerr.EWOULDBLOCK, translatedErr):
// Syscall would block, but completed a partial read/write.
// This case should only be returned by IssueIO for nonblocking
// files. Since we have a partial read/write, we consume
// ErrWouldBlock, returning the partial result.
return true, nil
- }
-
- switch errOrig.(type) {
- case syserror.SyscallRestartErrno:
+ case linuxerr.IsRestartError(translatedErr):
// Identical to the EINTR case.
return true, nil
}
diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go
index 090c5ffcb..2046a48b9 100644
--- a/pkg/sentry/syscalls/linux/linux64.go
+++ b/pkg/sentry/syscalls/linux/linux64.go
@@ -18,11 +18,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/syscalls"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -120,10 +120,10 @@ var AMD64 = &kernel.SyscallTable{
65: syscalls.PartiallySupported("semop", Semop, "Option SEM_UNDO not supported.", nil),
66: syscalls.Supported("semctl", Semctl),
67: syscalls.Supported("shmdt", Shmdt),
- 68: syscalls.ErrorWithEvent("msgget", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 69: syscalls.ErrorWithEvent("msgsnd", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 70: syscalls.ErrorWithEvent("msgrcv", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 71: syscalls.ErrorWithEvent("msgctl", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
+ 68: syscalls.Supported("msgget", Msgget),
+ 69: syscalls.Supported("msgsnd", Msgsnd),
+ 70: syscalls.Supported("msgrcv", Msgrcv),
+ 71: syscalls.Supported("msgctl", Msgctl),
72: syscalls.PartiallySupported("fcntl", Fcntl, "Not all options are supported.", nil),
73: syscalls.PartiallySupported("flock", Flock, "Locks are held within the sandbox only.", nil),
74: syscalls.PartiallySupported("fsync", Fsync, "Full data flush is not guaranteed at this time.", nil),
@@ -174,8 +174,8 @@ var AMD64 = &kernel.SyscallTable{
119: syscalls.Supported("setresgid", Setresgid),
120: syscalls.Supported("getresgid", Getresgid),
121: syscalls.Supported("getpgid", Getpgid),
- 122: syscalls.ErrorWithEvent("setfsuid", syserror.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
- 123: syscalls.ErrorWithEvent("setfsgid", syserror.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
+ 122: syscalls.ErrorWithEvent("setfsuid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
+ 123: syscalls.ErrorWithEvent("setfsgid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
124: syscalls.Supported("getsid", Getsid),
125: syscalls.Supported("capget", Capget),
126: syscalls.Supported("capset", Capset),
@@ -186,12 +186,12 @@ var AMD64 = &kernel.SyscallTable{
131: syscalls.Supported("sigaltstack", Sigaltstack),
132: syscalls.Supported("utime", Utime),
133: syscalls.PartiallySupported("mknod", Mknod, "Device creation is not generally supported. Only regular file and FIFO creation are supported.", nil),
- 134: syscalls.Error("uselib", syserror.ENOSYS, "Obsolete", nil),
- 135: syscalls.ErrorWithEvent("personality", syserror.EINVAL, "Unable to change personality.", nil),
- 136: syscalls.ErrorWithEvent("ustat", syserror.ENOSYS, "Needs filesystem support.", nil),
+ 134: syscalls.Error("uselib", linuxerr.ENOSYS, "Obsolete", nil),
+ 135: syscalls.ErrorWithEvent("personality", linuxerr.EINVAL, "Unable to change personality.", nil),
+ 136: syscalls.ErrorWithEvent("ustat", linuxerr.ENOSYS, "Needs filesystem support.", nil),
137: syscalls.PartiallySupported("statfs", Statfs, "Depends on the backing file system implementation.", nil),
138: syscalls.PartiallySupported("fstatfs", Fstatfs, "Depends on the backing file system implementation.", nil),
- 139: syscalls.ErrorWithEvent("sysfs", syserror.ENOSYS, "", []string{"gvisor.dev/issue/165"}),
+ 139: syscalls.ErrorWithEvent("sysfs", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/165"}),
140: syscalls.PartiallySupported("getpriority", Getpriority, "Stub implementation.", nil),
141: syscalls.PartiallySupported("setpriority", Setpriority, "Stub implementation.", nil),
142: syscalls.CapError("sched_setparam", linux.CAP_SYS_NICE, "", nil),
@@ -200,15 +200,15 @@ var AMD64 = &kernel.SyscallTable{
145: syscalls.PartiallySupported("sched_getscheduler", SchedGetscheduler, "Stub implementation.", nil),
146: syscalls.PartiallySupported("sched_get_priority_max", SchedGetPriorityMax, "Stub implementation.", nil),
147: syscalls.PartiallySupported("sched_get_priority_min", SchedGetPriorityMin, "Stub implementation.", nil),
- 148: syscalls.ErrorWithEvent("sched_rr_get_interval", syserror.EPERM, "", nil),
+ 148: syscalls.ErrorWithEvent("sched_rr_get_interval", linuxerr.EPERM, "", nil),
149: syscalls.PartiallySupported("mlock", Mlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
150: syscalls.PartiallySupported("munlock", Munlock, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
151: syscalls.PartiallySupported("mlockall", Mlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
152: syscalls.PartiallySupported("munlockall", Munlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
153: syscalls.CapError("vhangup", linux.CAP_SYS_TTY_CONFIG, "", nil),
- 154: syscalls.Error("modify_ldt", syserror.EPERM, "", nil),
- 155: syscalls.Error("pivot_root", syserror.EPERM, "", nil),
- 156: syscalls.Error("sysctl", syserror.EPERM, "Deprecated. Use /proc/sys instead.", nil),
+ 154: syscalls.Error("modify_ldt", linuxerr.EPERM, "", nil),
+ 155: syscalls.Error("pivot_root", linuxerr.EPERM, "", nil),
+ 156: syscalls.Error("sysctl", linuxerr.EPERM, "Deprecated. Use /proc/sys instead.", nil),
157: syscalls.PartiallySupported("prctl", Prctl, "Not all options are supported.", nil),
158: syscalls.PartiallySupported("arch_prctl", ArchPrctl, "Options ARCH_GET_GS, ARCH_SET_GS not supported.", nil),
159: syscalls.CapError("adjtimex", linux.CAP_SYS_TIME, "", nil),
@@ -229,15 +229,15 @@ var AMD64 = &kernel.SyscallTable{
174: syscalls.CapError("create_module", linux.CAP_SYS_MODULE, "", nil),
175: syscalls.CapError("init_module", linux.CAP_SYS_MODULE, "", nil),
176: syscalls.CapError("delete_module", linux.CAP_SYS_MODULE, "", nil),
- 177: syscalls.Error("get_kernel_syms", syserror.ENOSYS, "Not supported in Linux > 2.6.", nil),
- 178: syscalls.Error("query_module", syserror.ENOSYS, "Not supported in Linux > 2.6.", nil),
+ 177: syscalls.Error("get_kernel_syms", linuxerr.ENOSYS, "Not supported in Linux > 2.6.", nil),
+ 178: syscalls.Error("query_module", linuxerr.ENOSYS, "Not supported in Linux > 2.6.", nil),
179: syscalls.CapError("quotactl", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_admin for most operations
- 180: syscalls.Error("nfsservctl", syserror.ENOSYS, "Removed after Linux 3.1.", nil),
- 181: syscalls.Error("getpmsg", syserror.ENOSYS, "Not implemented in Linux.", nil),
- 182: syscalls.Error("putpmsg", syserror.ENOSYS, "Not implemented in Linux.", nil),
- 183: syscalls.Error("afs_syscall", syserror.ENOSYS, "Not implemented in Linux.", nil),
- 184: syscalls.Error("tuxcall", syserror.ENOSYS, "Not implemented in Linux.", nil),
- 185: syscalls.Error("security", syserror.ENOSYS, "Not implemented in Linux.", nil),
+ 180: syscalls.Error("nfsservctl", linuxerr.ENOSYS, "Removed after Linux 3.1.", nil),
+ 181: syscalls.Error("getpmsg", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
+ 182: syscalls.Error("putpmsg", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
+ 183: syscalls.Error("afs_syscall", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
+ 184: syscalls.Error("tuxcall", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
+ 185: syscalls.Error("security", linuxerr.ENOSYS, "Not implemented in Linux.", nil),
186: syscalls.Supported("gettid", Gettid),
187: syscalls.Supported("readahead", Readahead),
188: syscalls.PartiallySupported("setxattr", SetXattr, "Only supported for tmpfs.", nil),
@@ -257,18 +257,18 @@ var AMD64 = &kernel.SyscallTable{
202: syscalls.PartiallySupported("futex", Futex, "Robust futexes not supported.", nil),
203: syscalls.PartiallySupported("sched_setaffinity", SchedSetaffinity, "Stub implementation.", nil),
204: syscalls.PartiallySupported("sched_getaffinity", SchedGetaffinity, "Stub implementation.", nil),
- 205: syscalls.Error("set_thread_area", syserror.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
+ 205: syscalls.Error("set_thread_area", linuxerr.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
206: syscalls.PartiallySupported("io_setup", IoSetup, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
207: syscalls.PartiallySupported("io_destroy", IoDestroy, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
208: syscalls.PartiallySupported("io_getevents", IoGetevents, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
209: syscalls.PartiallySupported("io_submit", IoSubmit, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
210: syscalls.PartiallySupported("io_cancel", IoCancel, "Generally supported with exceptions. User ring optimizations are not implemented.", []string{"gvisor.dev/issue/204"}),
- 211: syscalls.Error("get_thread_area", syserror.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
+ 211: syscalls.Error("get_thread_area", linuxerr.ENOSYS, "Expected to return ENOSYS on 64-bit", nil),
212: syscalls.CapError("lookup_dcookie", linux.CAP_SYS_ADMIN, "", nil),
213: syscalls.Supported("epoll_create", EpollCreate),
- 214: syscalls.ErrorWithEvent("epoll_ctl_old", syserror.ENOSYS, "Deprecated.", nil),
- 215: syscalls.ErrorWithEvent("epoll_wait_old", syserror.ENOSYS, "Deprecated.", nil),
- 216: syscalls.ErrorWithEvent("remap_file_pages", syserror.ENOSYS, "Deprecated since Linux 3.16.", nil),
+ 214: syscalls.ErrorWithEvent("epoll_ctl_old", linuxerr.ENOSYS, "Deprecated.", nil),
+ 215: syscalls.ErrorWithEvent("epoll_wait_old", linuxerr.ENOSYS, "Deprecated.", nil),
+ 216: syscalls.ErrorWithEvent("remap_file_pages", linuxerr.ENOSYS, "Deprecated since Linux 3.16.", nil),
217: syscalls.Supported("getdents64", Getdents64),
218: syscalls.Supported("set_tid_address", SetTidAddress),
219: syscalls.Supported("restart_syscall", RestartSyscall),
@@ -288,21 +288,21 @@ var AMD64 = &kernel.SyscallTable{
233: syscalls.Supported("epoll_ctl", EpollCtl),
234: syscalls.Supported("tgkill", Tgkill),
235: syscalls.Supported("utimes", Utimes),
- 236: syscalls.Error("vserver", syserror.ENOSYS, "Not implemented by Linux", nil),
+ 236: syscalls.Error("vserver", linuxerr.ENOSYS, "Not implemented by Linux", nil),
237: syscalls.PartiallySupported("mbind", Mbind, "Stub implementation. Only a single NUMA node is advertised, and mempolicy is ignored accordingly, but mbind() will succeed and has effects reflected by get_mempolicy.", []string{"gvisor.dev/issue/262"}),
238: syscalls.PartiallySupported("set_mempolicy", SetMempolicy, "Stub implementation.", nil),
239: syscalls.PartiallySupported("get_mempolicy", GetMempolicy, "Stub implementation.", nil),
- 240: syscalls.ErrorWithEvent("mq_open", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 241: syscalls.ErrorWithEvent("mq_unlink", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 242: syscalls.ErrorWithEvent("mq_timedsend", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 243: syscalls.ErrorWithEvent("mq_timedreceive", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 244: syscalls.ErrorWithEvent("mq_notify", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 245: syscalls.ErrorWithEvent("mq_getsetattr", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 240: syscalls.ErrorWithEvent("mq_open", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 241: syscalls.ErrorWithEvent("mq_unlink", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 242: syscalls.ErrorWithEvent("mq_timedsend", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 243: syscalls.ErrorWithEvent("mq_timedreceive", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 244: syscalls.ErrorWithEvent("mq_notify", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 245: syscalls.ErrorWithEvent("mq_getsetattr", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
246: syscalls.CapError("kexec_load", linux.CAP_SYS_BOOT, "", nil),
247: syscalls.Supported("waitid", Waitid),
- 248: syscalls.Error("add_key", syserror.EACCES, "Not available to user.", nil),
- 249: syscalls.Error("request_key", syserror.EACCES, "Not available to user.", nil),
- 250: syscalls.Error("keyctl", syserror.EACCES, "Not available to user.", nil),
+ 248: syscalls.Error("add_key", linuxerr.EACCES, "Not available to user.", nil),
+ 249: syscalls.Error("request_key", linuxerr.EACCES, "Not available to user.", nil),
+ 250: syscalls.Error("keyctl", linuxerr.EACCES, "Not available to user.", nil),
251: syscalls.CapError("ioprio_set", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
252: syscalls.CapError("ioprio_get", linux.CAP_SYS_ADMIN, "", nil), // requires cap_sys_nice or cap_sys_admin (depending)
253: syscalls.PartiallySupported("inotify_init", InotifyInit, "Inotify events are only available inside the sandbox. Hard links are treated as different watch targets in gofer fs.", nil),
@@ -330,7 +330,7 @@ var AMD64 = &kernel.SyscallTable{
275: syscalls.Supported("splice", Splice),
276: syscalls.Supported("tee", Tee),
277: syscalls.PartiallySupported("sync_file_range", SyncFileRange, "Full data flush is not guaranteed at this time.", nil),
- 278: syscalls.ErrorWithEvent("vmsplice", syserror.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
+ 278: syscalls.ErrorWithEvent("vmsplice", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
279: syscalls.CapError("move_pages", linux.CAP_SYS_NICE, "", nil), // requires cap_sys_nice (mostly)
280: syscalls.Supported("utimensat", Utimensat),
281: syscalls.Supported("epoll_pwait", EpollPwait),
@@ -350,60 +350,60 @@ var AMD64 = &kernel.SyscallTable{
295: syscalls.Supported("preadv", Preadv),
296: syscalls.Supported("pwritev", Pwritev),
297: syscalls.Supported("rt_tgsigqueueinfo", RtTgsigqueueinfo),
- 298: syscalls.ErrorWithEvent("perf_event_open", syserror.ENODEV, "No support for perf counters", nil),
+ 298: syscalls.ErrorWithEvent("perf_event_open", linuxerr.ENODEV, "No support for perf counters", nil),
299: syscalls.PartiallySupported("recvmmsg", RecvMMsg, "Not all flags and control messages are supported.", nil),
- 300: syscalls.ErrorWithEvent("fanotify_init", syserror.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
- 301: syscalls.ErrorWithEvent("fanotify_mark", syserror.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
+ 300: syscalls.ErrorWithEvent("fanotify_init", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
+ 301: syscalls.ErrorWithEvent("fanotify_mark", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
302: syscalls.Supported("prlimit64", Prlimit64),
- 303: syscalls.Error("name_to_handle_at", syserror.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
- 304: syscalls.Error("open_by_handle_at", syserror.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
+ 303: syscalls.Error("name_to_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
+ 304: syscalls.Error("open_by_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
305: syscalls.CapError("clock_adjtime", linux.CAP_SYS_TIME, "", nil),
306: syscalls.PartiallySupported("syncfs", Syncfs, "Depends on backing file system.", nil),
307: syscalls.PartiallySupported("sendmmsg", SendMMsg, "Not all flags and control messages are supported.", nil),
- 308: syscalls.ErrorWithEvent("setns", syserror.EOPNOTSUPP, "Needs filesystem support", []string{"gvisor.dev/issue/140"}), // TODO(b/29354995)
+ 308: syscalls.ErrorWithEvent("setns", linuxerr.EOPNOTSUPP, "Needs filesystem support", []string{"gvisor.dev/issue/140"}), // TODO(b/29354995)
309: syscalls.Supported("getcpu", Getcpu),
- 310: syscalls.ErrorWithEvent("process_vm_readv", syserror.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
- 311: syscalls.ErrorWithEvent("process_vm_writev", syserror.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
+ 310: syscalls.ErrorWithEvent("process_vm_readv", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
+ 311: syscalls.ErrorWithEvent("process_vm_writev", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
312: syscalls.CapError("kcmp", linux.CAP_SYS_PTRACE, "", nil),
313: syscalls.CapError("finit_module", linux.CAP_SYS_MODULE, "", nil),
- 314: syscalls.ErrorWithEvent("sched_setattr", syserror.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
- 315: syscalls.ErrorWithEvent("sched_getattr", syserror.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
- 316: syscalls.ErrorWithEvent("renameat2", syserror.ENOSYS, "", []string{"gvisor.dev/issue/263"}), // TODO(b/118902772)
+ 314: syscalls.ErrorWithEvent("sched_setattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
+ 315: syscalls.ErrorWithEvent("sched_getattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
+ 316: syscalls.ErrorWithEvent("renameat2", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/263"}), // TODO(b/118902772)
317: syscalls.Supported("seccomp", Seccomp),
318: syscalls.Supported("getrandom", GetRandom),
319: syscalls.Supported("memfd_create", MemfdCreate),
320: syscalls.CapError("kexec_file_load", linux.CAP_SYS_BOOT, "", nil),
321: syscalls.CapError("bpf", linux.CAP_SYS_ADMIN, "", nil),
322: syscalls.Supported("execveat", Execveat),
- 323: syscalls.ErrorWithEvent("userfaultfd", syserror.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
+ 323: syscalls.ErrorWithEvent("userfaultfd", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
324: syscalls.PartiallySupported("membarrier", Membarrier, "Not supported on all platforms.", nil),
325: syscalls.PartiallySupported("mlock2", Mlock2, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
// Syscalls implemented after 325 are "backports" from versions
// of Linux after 4.4.
- 326: syscalls.ErrorWithEvent("copy_file_range", syserror.ENOSYS, "", nil),
+ 326: syscalls.ErrorWithEvent("copy_file_range", linuxerr.ENOSYS, "", nil),
327: syscalls.Supported("preadv2", Preadv2),
328: syscalls.PartiallySupported("pwritev2", Pwritev2, "Flag RWF_HIPRI is not supported.", nil),
- 329: syscalls.ErrorWithEvent("pkey_mprotect", syserror.ENOSYS, "", nil),
- 330: syscalls.ErrorWithEvent("pkey_alloc", syserror.ENOSYS, "", nil),
- 331: syscalls.ErrorWithEvent("pkey_free", syserror.ENOSYS, "", nil),
+ 329: syscalls.ErrorWithEvent("pkey_mprotect", linuxerr.ENOSYS, "", nil),
+ 330: syscalls.ErrorWithEvent("pkey_alloc", linuxerr.ENOSYS, "", nil),
+ 331: syscalls.ErrorWithEvent("pkey_free", linuxerr.ENOSYS, "", nil),
332: syscalls.Supported("statx", Statx),
- 333: syscalls.ErrorWithEvent("io_pgetevents", syserror.ENOSYS, "", nil),
+ 333: syscalls.ErrorWithEvent("io_pgetevents", linuxerr.ENOSYS, "", nil),
334: syscalls.PartiallySupported("rseq", RSeq, "Not supported on all platforms.", nil),
// Linux skips ahead to syscall 424 to sync numbers between arches.
- 424: syscalls.ErrorWithEvent("pidfd_send_signal", syserror.ENOSYS, "", nil),
- 425: syscalls.ErrorWithEvent("io_uring_setup", syserror.ENOSYS, "", nil),
- 426: syscalls.ErrorWithEvent("io_uring_enter", syserror.ENOSYS, "", nil),
- 427: syscalls.ErrorWithEvent("io_uring_register", syserror.ENOSYS, "", nil),
- 428: syscalls.ErrorWithEvent("open_tree", syserror.ENOSYS, "", nil),
- 429: syscalls.ErrorWithEvent("move_mount", syserror.ENOSYS, "", nil),
- 430: syscalls.ErrorWithEvent("fsopen", syserror.ENOSYS, "", nil),
- 431: syscalls.ErrorWithEvent("fsconfig", syserror.ENOSYS, "", nil),
- 432: syscalls.ErrorWithEvent("fsmount", syserror.ENOSYS, "", nil),
- 433: syscalls.ErrorWithEvent("fspick", syserror.ENOSYS, "", nil),
- 434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
- 435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
+ 424: syscalls.ErrorWithEvent("pidfd_send_signal", linuxerr.ENOSYS, "", nil),
+ 425: syscalls.ErrorWithEvent("io_uring_setup", linuxerr.ENOSYS, "", nil),
+ 426: syscalls.ErrorWithEvent("io_uring_enter", linuxerr.ENOSYS, "", nil),
+ 427: syscalls.ErrorWithEvent("io_uring_register", linuxerr.ENOSYS, "", nil),
+ 428: syscalls.ErrorWithEvent("open_tree", linuxerr.ENOSYS, "", nil),
+ 429: syscalls.ErrorWithEvent("move_mount", linuxerr.ENOSYS, "", nil),
+ 430: syscalls.ErrorWithEvent("fsopen", linuxerr.ENOSYS, "", nil),
+ 431: syscalls.ErrorWithEvent("fsconfig", linuxerr.ENOSYS, "", nil),
+ 432: syscalls.ErrorWithEvent("fsmount", linuxerr.ENOSYS, "", nil),
+ 433: syscalls.ErrorWithEvent("fspick", linuxerr.ENOSYS, "", nil),
+ 434: syscalls.ErrorWithEvent("pidfd_open", linuxerr.ENOSYS, "", nil),
+ 435: syscalls.ErrorWithEvent("clone3", linuxerr.ENOSYS, "", nil),
441: syscalls.Supported("epoll_pwait2", EpollPwait2),
},
Emulate: map[hostarch.Addr]uintptr{
@@ -413,7 +413,7 @@ var AMD64 = &kernel.SyscallTable{
},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t)
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
},
}
@@ -470,8 +470,8 @@ var ARM64 = &kernel.SyscallTable{
38: syscalls.Supported("renameat", Renameat),
39: syscalls.PartiallySupported("umount2", Umount2, "Not all options or file systems are supported.", nil),
40: syscalls.PartiallySupported("mount", Mount, "Not all options or file systems are supported.", nil),
- 41: syscalls.Error("pivot_root", syserror.EPERM, "", nil),
- 42: syscalls.Error("nfsservctl", syserror.ENOSYS, "Removed after Linux 3.1.", nil),
+ 41: syscalls.Error("pivot_root", linuxerr.EPERM, "", nil),
+ 42: syscalls.Error("nfsservctl", linuxerr.ENOSYS, "Removed after Linux 3.1.", nil),
43: syscalls.PartiallySupported("statfs", Statfs, "Depends on the backing file system implementation.", nil),
44: syscalls.PartiallySupported("fstatfs", Fstatfs, "Depends on the backing file system implementation.", nil),
45: syscalls.Supported("truncate", Truncate),
@@ -504,7 +504,7 @@ var ARM64 = &kernel.SyscallTable{
72: syscalls.Supported("pselect", Pselect),
73: syscalls.Supported("ppoll", Ppoll),
74: syscalls.PartiallySupported("signalfd4", Signalfd4, "Semantics are slightly different.", []string{"gvisor.dev/issue/139"}),
- 75: syscalls.ErrorWithEvent("vmsplice", syserror.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
+ 75: syscalls.ErrorWithEvent("vmsplice", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/138"}), // TODO(b/29354098)
76: syscalls.Supported("splice", Splice),
77: syscalls.Supported("tee", Tee),
78: syscalls.Supported("readlinkat", Readlinkat),
@@ -521,7 +521,7 @@ var ARM64 = &kernel.SyscallTable{
89: syscalls.CapError("acct", linux.CAP_SYS_PACCT, "", nil),
90: syscalls.Supported("capget", Capget),
91: syscalls.Supported("capset", Capset),
- 92: syscalls.ErrorWithEvent("personality", syserror.EINVAL, "Unable to change personality.", nil),
+ 92: syscalls.ErrorWithEvent("personality", linuxerr.EINVAL, "Unable to change personality.", nil),
93: syscalls.Supported("exit", Exit),
94: syscalls.Supported("exit_group", ExitGroup),
95: syscalls.Supported("waitid", Waitid),
@@ -556,7 +556,7 @@ var ARM64 = &kernel.SyscallTable{
124: syscalls.Supported("sched_yield", SchedYield),
125: syscalls.PartiallySupported("sched_get_priority_max", SchedGetPriorityMax, "Stub implementation.", nil),
126: syscalls.PartiallySupported("sched_get_priority_min", SchedGetPriorityMin, "Stub implementation.", nil),
- 127: syscalls.ErrorWithEvent("sched_rr_get_interval", syserror.EPERM, "", nil),
+ 127: syscalls.ErrorWithEvent("sched_rr_get_interval", linuxerr.EPERM, "", nil),
128: syscalls.Supported("restart_syscall", RestartSyscall),
129: syscalls.Supported("kill", Kill),
130: syscalls.Supported("tkill", Tkill),
@@ -580,8 +580,8 @@ var ARM64 = &kernel.SyscallTable{
148: syscalls.Supported("getresuid", Getresuid),
149: syscalls.Supported("setresgid", Setresgid),
150: syscalls.Supported("getresgid", Getresgid),
- 151: syscalls.ErrorWithEvent("setfsuid", syserror.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
- 152: syscalls.ErrorWithEvent("setfsgid", syserror.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
+ 151: syscalls.ErrorWithEvent("setfsuid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
+ 152: syscalls.ErrorWithEvent("setfsgid", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/260"}), // TODO(b/112851702)
153: syscalls.Supported("times", Times),
154: syscalls.Supported("setpgid", Setpgid),
155: syscalls.Supported("getpgid", Getpgid),
@@ -609,16 +609,16 @@ var ARM64 = &kernel.SyscallTable{
177: syscalls.Supported("getegid", Getegid),
178: syscalls.Supported("gettid", Gettid),
179: syscalls.PartiallySupported("sysinfo", Sysinfo, "Fields loads, sharedram, bufferram, totalswap, freeswap, totalhigh, freehigh not supported.", nil),
- 180: syscalls.ErrorWithEvent("mq_open", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 181: syscalls.ErrorWithEvent("mq_unlink", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 182: syscalls.ErrorWithEvent("mq_timedsend", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 183: syscalls.ErrorWithEvent("mq_timedreceive", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 184: syscalls.ErrorWithEvent("mq_notify", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 185: syscalls.ErrorWithEvent("mq_getsetattr", syserror.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
- 186: syscalls.ErrorWithEvent("msgget", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 187: syscalls.ErrorWithEvent("msgctl", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 188: syscalls.ErrorWithEvent("msgrcv", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
- 189: syscalls.ErrorWithEvent("msgsnd", syserror.ENOSYS, "", []string{"gvisor.dev/issue/135"}), // TODO(b/29354921)
+ 180: syscalls.ErrorWithEvent("mq_open", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 181: syscalls.ErrorWithEvent("mq_unlink", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 182: syscalls.ErrorWithEvent("mq_timedsend", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 183: syscalls.ErrorWithEvent("mq_timedreceive", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 184: syscalls.ErrorWithEvent("mq_notify", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 185: syscalls.ErrorWithEvent("mq_getsetattr", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/136"}), // TODO(b/29354921)
+ 186: syscalls.Supported("msgget", Msgget),
+ 187: syscalls.Supported("msgctl", Msgctl),
+ 188: syscalls.Supported("msgrcv", Msgrcv),
+ 189: syscalls.Supported("msgsnd", Msgsnd),
190: syscalls.Supported("semget", Semget),
191: syscalls.Supported("semctl", Semctl),
192: syscalls.Supported("semtimedop", Semtimedop),
@@ -646,9 +646,9 @@ var ARM64 = &kernel.SyscallTable{
214: syscalls.Supported("brk", Brk),
215: syscalls.Supported("munmap", Munmap),
216: syscalls.Supported("mremap", Mremap),
- 217: syscalls.Error("add_key", syserror.EACCES, "Not available to user.", nil),
- 218: syscalls.Error("request_key", syserror.EACCES, "Not available to user.", nil),
- 219: syscalls.Error("keyctl", syserror.EACCES, "Not available to user.", nil),
+ 217: syscalls.Error("add_key", linuxerr.EACCES, "Not available to user.", nil),
+ 218: syscalls.Error("request_key", linuxerr.EACCES, "Not available to user.", nil),
+ 219: syscalls.Error("keyctl", linuxerr.EACCES, "Not available to user.", nil),
220: syscalls.PartiallySupported("clone", Clone, "Mount namespace (CLONE_NEWNS) not supported. Options CLONE_PARENT, CLONE_SYSVSEM not supported.", nil),
221: syscalls.Supported("execve", Execve),
222: syscalls.PartiallySupported("mmap", Mmap, "Generally supported with exceptions. Options MAP_FIXED_NOREPLACE, MAP_SHARED_VALIDATE, MAP_SYNC MAP_GROWSDOWN, MAP_HUGETLB are not supported.", nil),
@@ -663,72 +663,72 @@ var ARM64 = &kernel.SyscallTable{
231: syscalls.PartiallySupported("munlockall", Munlockall, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
232: syscalls.PartiallySupported("mincore", Mincore, "Stub implementation. The sandbox does not have access to this information. Reports all mapped pages are resident.", nil),
233: syscalls.PartiallySupported("madvise", Madvise, "Options MADV_DONTNEED, MADV_DONTFORK are supported. Other advice is ignored.", nil),
- 234: syscalls.ErrorWithEvent("remap_file_pages", syserror.ENOSYS, "Deprecated since Linux 3.16.", nil),
+ 234: syscalls.ErrorWithEvent("remap_file_pages", linuxerr.ENOSYS, "Deprecated since Linux 3.16.", nil),
235: syscalls.PartiallySupported("mbind", Mbind, "Stub implementation. Only a single NUMA node is advertised, and mempolicy is ignored accordingly, but mbind() will succeed and has effects reflected by get_mempolicy.", []string{"gvisor.dev/issue/262"}),
236: syscalls.PartiallySupported("get_mempolicy", GetMempolicy, "Stub implementation.", nil),
237: syscalls.PartiallySupported("set_mempolicy", SetMempolicy, "Stub implementation.", nil),
238: syscalls.CapError("migrate_pages", linux.CAP_SYS_NICE, "", nil),
239: syscalls.CapError("move_pages", linux.CAP_SYS_NICE, "", nil), // requires cap_sys_nice (mostly)
240: syscalls.Supported("rt_tgsigqueueinfo", RtTgsigqueueinfo),
- 241: syscalls.ErrorWithEvent("perf_event_open", syserror.ENODEV, "No support for perf counters", nil),
+ 241: syscalls.ErrorWithEvent("perf_event_open", linuxerr.ENODEV, "No support for perf counters", nil),
242: syscalls.Supported("accept4", Accept4),
243: syscalls.PartiallySupported("recvmmsg", RecvMMsg, "Not all flags and control messages are supported.", nil),
260: syscalls.Supported("wait4", Wait4),
261: syscalls.Supported("prlimit64", Prlimit64),
- 262: syscalls.ErrorWithEvent("fanotify_init", syserror.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
- 263: syscalls.ErrorWithEvent("fanotify_mark", syserror.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
- 264: syscalls.Error("name_to_handle_at", syserror.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
- 265: syscalls.Error("open_by_handle_at", syserror.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
+ 262: syscalls.ErrorWithEvent("fanotify_init", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
+ 263: syscalls.ErrorWithEvent("fanotify_mark", linuxerr.ENOSYS, "Needs CONFIG_FANOTIFY", nil),
+ 264: syscalls.Error("name_to_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
+ 265: syscalls.Error("open_by_handle_at", linuxerr.EOPNOTSUPP, "Not supported by gVisor filesystems", nil),
266: syscalls.CapError("clock_adjtime", linux.CAP_SYS_TIME, "", nil),
267: syscalls.PartiallySupported("syncfs", Syncfs, "Depends on backing file system.", nil),
- 268: syscalls.ErrorWithEvent("setns", syserror.EOPNOTSUPP, "Needs filesystem support", []string{"gvisor.dev/issue/140"}), // TODO(b/29354995)
+ 268: syscalls.ErrorWithEvent("setns", linuxerr.EOPNOTSUPP, "Needs filesystem support", []string{"gvisor.dev/issue/140"}), // TODO(b/29354995)
269: syscalls.PartiallySupported("sendmmsg", SendMMsg, "Not all flags and control messages are supported.", nil),
- 270: syscalls.ErrorWithEvent("process_vm_readv", syserror.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
- 271: syscalls.ErrorWithEvent("process_vm_writev", syserror.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
+ 270: syscalls.ErrorWithEvent("process_vm_readv", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
+ 271: syscalls.ErrorWithEvent("process_vm_writev", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/158"}),
272: syscalls.CapError("kcmp", linux.CAP_SYS_PTRACE, "", nil),
273: syscalls.CapError("finit_module", linux.CAP_SYS_MODULE, "", nil),
- 274: syscalls.ErrorWithEvent("sched_setattr", syserror.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
- 275: syscalls.ErrorWithEvent("sched_getattr", syserror.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
- 276: syscalls.ErrorWithEvent("renameat2", syserror.ENOSYS, "", []string{"gvisor.dev/issue/263"}), // TODO(b/118902772)
+ 274: syscalls.ErrorWithEvent("sched_setattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
+ 275: syscalls.ErrorWithEvent("sched_getattr", linuxerr.ENOSYS, "gVisor does not implement a scheduler.", []string{"gvisor.dev/issue/264"}), // TODO(b/118902272)
+ 276: syscalls.ErrorWithEvent("renameat2", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/263"}), // TODO(b/118902772)
277: syscalls.Supported("seccomp", Seccomp),
278: syscalls.Supported("getrandom", GetRandom),
279: syscalls.Supported("memfd_create", MemfdCreate),
280: syscalls.CapError("bpf", linux.CAP_SYS_ADMIN, "", nil),
281: syscalls.Supported("execveat", Execveat),
- 282: syscalls.ErrorWithEvent("userfaultfd", syserror.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
+ 282: syscalls.ErrorWithEvent("userfaultfd", linuxerr.ENOSYS, "", []string{"gvisor.dev/issue/266"}), // TODO(b/118906345)
283: syscalls.PartiallySupported("membarrier", Membarrier, "Not supported on all platforms.", nil),
284: syscalls.PartiallySupported("mlock2", Mlock2, "Stub implementation. The sandbox lacks appropriate permissions.", nil),
// Syscalls after 284 are "backports" from versions of Linux after 4.4.
- 285: syscalls.ErrorWithEvent("copy_file_range", syserror.ENOSYS, "", nil),
+ 285: syscalls.ErrorWithEvent("copy_file_range", linuxerr.ENOSYS, "", nil),
286: syscalls.Supported("preadv2", Preadv2),
287: syscalls.PartiallySupported("pwritev2", Pwritev2, "Flag RWF_HIPRI is not supported.", nil),
- 288: syscalls.ErrorWithEvent("pkey_mprotect", syserror.ENOSYS, "", nil),
- 289: syscalls.ErrorWithEvent("pkey_alloc", syserror.ENOSYS, "", nil),
- 290: syscalls.ErrorWithEvent("pkey_free", syserror.ENOSYS, "", nil),
+ 288: syscalls.ErrorWithEvent("pkey_mprotect", linuxerr.ENOSYS, "", nil),
+ 289: syscalls.ErrorWithEvent("pkey_alloc", linuxerr.ENOSYS, "", nil),
+ 290: syscalls.ErrorWithEvent("pkey_free", linuxerr.ENOSYS, "", nil),
291: syscalls.Supported("statx", Statx),
- 292: syscalls.ErrorWithEvent("io_pgetevents", syserror.ENOSYS, "", nil),
+ 292: syscalls.ErrorWithEvent("io_pgetevents", linuxerr.ENOSYS, "", nil),
293: syscalls.PartiallySupported("rseq", RSeq, "Not supported on all platforms.", nil),
// Linux skips ahead to syscall 424 to sync numbers between arches.
- 424: syscalls.ErrorWithEvent("pidfd_send_signal", syserror.ENOSYS, "", nil),
- 425: syscalls.ErrorWithEvent("io_uring_setup", syserror.ENOSYS, "", nil),
- 426: syscalls.ErrorWithEvent("io_uring_enter", syserror.ENOSYS, "", nil),
- 427: syscalls.ErrorWithEvent("io_uring_register", syserror.ENOSYS, "", nil),
- 428: syscalls.ErrorWithEvent("open_tree", syserror.ENOSYS, "", nil),
- 429: syscalls.ErrorWithEvent("move_mount", syserror.ENOSYS, "", nil),
- 430: syscalls.ErrorWithEvent("fsopen", syserror.ENOSYS, "", nil),
- 431: syscalls.ErrorWithEvent("fsconfig", syserror.ENOSYS, "", nil),
- 432: syscalls.ErrorWithEvent("fsmount", syserror.ENOSYS, "", nil),
- 433: syscalls.ErrorWithEvent("fspick", syserror.ENOSYS, "", nil),
- 434: syscalls.ErrorWithEvent("pidfd_open", syserror.ENOSYS, "", nil),
- 435: syscalls.ErrorWithEvent("clone3", syserror.ENOSYS, "", nil),
+ 424: syscalls.ErrorWithEvent("pidfd_send_signal", linuxerr.ENOSYS, "", nil),
+ 425: syscalls.ErrorWithEvent("io_uring_setup", linuxerr.ENOSYS, "", nil),
+ 426: syscalls.ErrorWithEvent("io_uring_enter", linuxerr.ENOSYS, "", nil),
+ 427: syscalls.ErrorWithEvent("io_uring_register", linuxerr.ENOSYS, "", nil),
+ 428: syscalls.ErrorWithEvent("open_tree", linuxerr.ENOSYS, "", nil),
+ 429: syscalls.ErrorWithEvent("move_mount", linuxerr.ENOSYS, "", nil),
+ 430: syscalls.ErrorWithEvent("fsopen", linuxerr.ENOSYS, "", nil),
+ 431: syscalls.ErrorWithEvent("fsconfig", linuxerr.ENOSYS, "", nil),
+ 432: syscalls.ErrorWithEvent("fsmount", linuxerr.ENOSYS, "", nil),
+ 433: syscalls.ErrorWithEvent("fspick", linuxerr.ENOSYS, "", nil),
+ 434: syscalls.ErrorWithEvent("pidfd_open", linuxerr.ENOSYS, "", nil),
+ 435: syscalls.ErrorWithEvent("clone3", linuxerr.ENOSYS, "", nil),
441: syscalls.Supported("epoll_pwait2", EpollPwait2),
},
Emulate: map[hostarch.Addr]uintptr{},
Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
t.Kernel().EmitUnimplementedEvent(t)
- return 0, syserror.ENOSYS
+ return 0, linuxerr.ENOSYS
},
}
diff --git a/pkg/sentry/syscalls/linux/sigset.go b/pkg/sentry/syscalls/linux/sigset.go
index e8c2d8f9e..373948991 100644
--- a/pkg/sentry/syscalls/linux/sigset.go
+++ b/pkg/sentry/syscalls/linux/sigset.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// CopyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and
@@ -29,7 +29,7 @@ import (
// syscalls are moved into this package, then they can be unexported.
func CopyInSigSet(t *kernel.Task, sigSetAddr hostarch.Addr, size uint) (linux.SignalSet, error) {
if size != linux.SignalSetSize {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
b := t.CopyScratchBuffer(8)
if _, err := t.CopyInBytes(sigSetAddr, b); err != nil {
@@ -66,6 +66,6 @@ func copyInSigSetWithSize(t *kernel.Task, addr hostarch.Addr) (hostarch.Addr, ui
maskSize := uint(hostarch.ByteOrder.Uint64(in[8:]))
return maskAddr, maskSize, nil
default:
- return 0, 0, syserror.ENOSYS
+ return 0, 0, linuxerr.ENOSYS
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_aio.go b/pkg/sentry/syscalls/linux/sys_aio.go
index 70e8569a8..2f00c3783 100644
--- a/pkg/sentry/syscalls/linux/sys_aio.go
+++ b/pkg/sentry/syscalls/linux/sys_aio.go
@@ -17,6 +17,7 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -25,7 +26,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/eventfd"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -42,7 +43,7 @@ func IoSetup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
return 0, nil, err
}
if idIn != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
id, err := t.MemoryManager().NewAIOContext(t, uint32(nrEvents))
@@ -66,7 +67,7 @@ func IoDestroy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
ctx := t.MemoryManager().DestroyAIOContext(t, id)
if ctx == nil {
// Does not exist.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Drain completed requests amd wait for pending requests until there are no
@@ -97,12 +98,12 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Sanity check arguments.
if minEvents < 0 || minEvents > events {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ctx, ok := t.MemoryManager().LookupAIOContext(t, id)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Setup the timeout.
@@ -114,7 +115,7 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
return 0, nil, err
}
if !d.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
deadline = t.Kernel().MonotonicClock().Now().Add(d.ToDuration())
haveDeadline = true
@@ -134,10 +135,10 @@ func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
var err error
v, err = waitForRequest(ctx, t, haveDeadline, deadline)
if err != nil {
- if count > 0 || err == syserror.ETIMEDOUT {
+ if count > 0 || linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
return uintptr(count), nil, nil
}
- return 0, nil, syserror.ConvertIntr(err, syserror.EINTR)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.EINTR)
}
}
@@ -171,7 +172,7 @@ func waitForRequest(ctx *mm.AIOContext, t *kernel.Task, haveDeadline bool, deadl
done := ctx.WaitChannel()
if done == nil {
// Context has been destroyed.
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
if err := t.BlockWithDeadline(done, haveDeadline, deadline); err != nil {
return nil, err
@@ -184,7 +185,7 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
bytes := int(cb.Bytes)
if bytes < 0 {
// Linux also requires that this field fit in ssize_t.
- return usermem.IOSequence{}, syserror.EINVAL
+ return usermem.IOSequence{}, linuxerr.EINVAL
}
// Since this I/O will be asynchronous with respect to t's task goroutine,
@@ -206,7 +207,7 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
default:
// Not a supported command.
- return usermem.IOSequence{}, syserror.EINVAL
+ return usermem.IOSequence{}, linuxerr.EINVAL
}
}
@@ -215,7 +216,7 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
// It is not presently supported (ENOSYS indicates no support on this
// architecture).
func IoCancel(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
// LINT.IfChange
@@ -269,7 +270,7 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr host
file := t.GetFile(cb.FD)
if file == nil {
// File not found.
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer file.DecRef(t)
@@ -279,14 +280,14 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr host
eventFile = t.GetFile(cb.ResFD)
if eventFile == nil {
// Bad FD.
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer eventFile.DecRef(t)
// Check that it is an eventfd.
if _, ok := eventFile.FileOperations.(*eventfd.EventOperations); !ok {
// Not an event FD.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -299,14 +300,14 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr host
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITE, linux.IOCB_CMD_PWRITEV:
if cb.Offset < 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
// Prepare the request.
ctx, ok := t.MemoryManager().LookupAIOContext(t, id)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if err := ctx.Prepare(); err != nil {
return err
@@ -335,7 +336,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
addr := args[2].Pointer()
if nrEvents < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
for i := int32(0); i < nrEvents; i++ {
@@ -354,7 +355,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
cbAddr = hostarch.Addr(cbAddrP)
default:
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
// Copy in this callback.
diff --git a/pkg/sentry/syscalls/linux/sys_capability.go b/pkg/sentry/syscalls/linux/sys_capability.go
index d3b85e11b..1e714503c 100644
--- a/pkg/sentry/syscalls/linux/sys_capability.go
+++ b/pkg/sentry/syscalls/linux/sys_capability.go
@@ -16,22 +16,22 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
)
func lookupCaps(t *kernel.Task, tid kernel.ThreadID) (permitted, inheritable, effective auth.CapabilitySet, err error) {
if tid < 0 {
- err = syserror.EINVAL
+ err = linuxerr.EINVAL
return
}
if tid > 0 {
t = t.PIDNamespace().TaskWithID(tid)
}
if t == nil {
- err = syserror.ESRCH
+ err = linuxerr.ESRCH
return
}
creds := t.Credentials()
@@ -97,7 +97,7 @@ func Capget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, err
}
if dataAddr != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
}
@@ -115,7 +115,7 @@ func Capset(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
switch hdr.Version {
case linux.LINUX_CAPABILITY_VERSION_1:
if tid := kernel.ThreadID(hdr.Pid); tid != 0 && tid != t.ThreadID() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
var data linux.CapUserData
if _, err := data.CopyIn(t, dataAddr); err != nil {
@@ -128,7 +128,7 @@ func Capset(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.LINUX_CAPABILITY_VERSION_2, linux.LINUX_CAPABILITY_VERSION_3:
if tid := kernel.ThreadID(hdr.Pid); tid != 0 && tid != t.ThreadID() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
var data [2]linux.CapUserData
if _, err := linux.CopyCapUserDataSliceIn(t, dataAddr, data[:]); err != nil {
@@ -144,6 +144,6 @@ func Capset(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
if _, err := hdr.CopyOut(t, hdrAddr); err != nil {
return 0, nil, err
}
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_clone_amd64.go b/pkg/sentry/syscalls/linux/sys_clone_amd64.go
index dd43cf18d..2b2dbd9f9 100644
--- a/pkg/sentry/syscalls/linux/sys_clone_amd64.go
+++ b/pkg/sentry/syscalls/linux/sys_clone_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package linux
diff --git a/pkg/sentry/syscalls/linux/sys_clone_arm64.go b/pkg/sentry/syscalls/linux/sys_clone_arm64.go
index cf68a8949..877c86e6a 100644
--- a/pkg/sentry/syscalls/linux/sys_clone_arm64.go
+++ b/pkg/sentry/syscalls/linux/sys_clone_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package linux
diff --git a/pkg/sentry/syscalls/linux/sys_epoll.go b/pkg/sentry/syscalls/linux/sys_epoll.go
index 69cbc98d0..6c807124c 100644
--- a/pkg/sentry/syscalls/linux/sys_epoll.go
+++ b/pkg/sentry/syscalls/linux/sys_epoll.go
@@ -16,12 +16,13 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/epoll"
"gvisor.dev/gvisor/pkg/sentry/syscalls"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -31,7 +32,7 @@ import (
func EpollCreate1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
flags := args[0].Int()
if flags & ^linux.EPOLL_CLOEXEC != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
closeOnExec := flags&linux.EPOLL_CLOEXEC != 0
@@ -48,7 +49,7 @@ func EpollCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
size := args[0].Int()
if size <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
fd, err := syscalls.CreateEpoll(t, false)
@@ -101,14 +102,14 @@ func EpollCtl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
mask |= waiter.EventHUp | waiter.EventErr
return 0, nil, syscalls.UpdateEpoll(t, epfd, fd, flags, mask, data)
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
func waitEpoll(t *kernel.Task, fd int32, eventsAddr hostarch.Addr, max int, timeoutInNanos int64) (uintptr, *kernel.SyscallControl, error) {
r, err := syscalls.WaitEpoll(t, fd, max, timeoutInNanos)
if err != nil {
- return 0, nil, syserror.ConvertIntr(err, syserror.EINTR)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.EINTR)
}
if len(r) != 0 {
diff --git a/pkg/sentry/syscalls/linux/sys_eventfd.go b/pkg/sentry/syscalls/linux/sys_eventfd.go
index 3b4f879e4..7ba9a755e 100644
--- a/pkg/sentry/syscalls/linux/sys_eventfd.go
+++ b/pkg/sentry/syscalls/linux/sys_eventfd.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/eventfd"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Eventfd2 implements linux syscall eventfd2(2).
@@ -30,7 +30,7 @@ func Eventfd2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
allOps := uint(linux.EFD_SEMAPHORE | linux.EFD_NONBLOCK | linux.EFD_CLOEXEC)
if flags & ^allOps != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
event := eventfd.New(t, uint64(initVal), flags&linux.EFD_SEMAPHORE != 0)
diff --git a/pkg/sentry/syscalls/linux/sys_file.go b/pkg/sentry/syscalls/linux/sys_file.go
index 90a719ba2..e79b92fb6 100644
--- a/pkg/sentry/syscalls/linux/sys_file.go
+++ b/pkg/sentry/syscalls/linux/sys_file.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -29,7 +30,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/fasync"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// fileOpAt performs an operation on the second last component in the path.
@@ -79,12 +80,12 @@ func fileOpOn(t *kernel.Task, dirFD int32, path string, resolve bool, fn func(ro
// Need to extract the given FD.
f = t.GetFile(dirFD)
if f == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
rel = f.Dirent
if !fs.IsDir(rel.Inode.StableAttr) {
f.DecRef(t)
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
@@ -121,7 +122,7 @@ func copyInPath(t *kernel.Task, addr hostarch.Addr, allowEmpty bool) (path strin
return "", false, err
}
if path == "" && !allowEmpty {
- return "", false, syserror.ENOENT
+ return "", false, linuxerr.ENOENT
}
// If the path ends with a /, then checks must be enforced in various
@@ -152,7 +153,7 @@ func openAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint) (fd uin
}
if fs.IsSymlink(d.Inode.StableAttr) && !resolve {
- return syserror.ELOOP
+ return linuxerr.ELOOP
}
fileFlags := linuxToFlags(flags)
@@ -161,22 +162,22 @@ func openAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint) (fd uin
if fs.IsDir(d.Inode.StableAttr) {
// Don't allow directories to be opened writable.
if fileFlags.Write {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
} else {
// If O_DIRECTORY is set, but the file is not a directory, then fail.
if fileFlags.Directory {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// If it's a directory, then make sure.
if dirPath {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
}
file, err := d.Inode.GetFile(t, d, fileFlags)
if err != nil {
- return syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
defer file.DecRef(t)
@@ -214,12 +215,12 @@ func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMod
return err
}
if dirPath {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string, _ uint) error {
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Do we have the appropriate permissions on the parent?
@@ -260,7 +261,7 @@ func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMod
// Instead of emulating this seemingly useless behaviour, we'll
// indicate that the filesystem doesn't support the creation of
// sockets.
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
case linux.ModeCharacterDevice:
fallthrough
@@ -270,12 +271,12 @@ func mknodAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMod
//
// When we start supporting block and character devices, we'll
// need to check for CAP_MKNOD here.
- return syserror.EPERM
+ return linuxerr.EPERM
default:
// "EINVAL - mode requested creation of something other than a
// regular file, device special file, FIFO or socket." - mknod(2)
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
})
}
@@ -307,7 +308,7 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
return 0, err
}
if dirPath {
- return 0, syserror.ENOENT
+ return 0, linuxerr.ENOENT
}
fileFlags := linuxToFlags(flags)
@@ -325,7 +326,7 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
)
for {
if !fs.IsDir(parent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Start by looking up the dirent at 'name'.
@@ -339,7 +340,7 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
// O_EXCL flag was passed, then we can immediately
// return EEXIST.
if flags&linux.O_EXCL != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
// If we have a non-symlink, then we can proceed.
@@ -350,7 +351,7 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
// If O_NOFOLLOW was passed, then don't try to resolve
// anything.
if flags&linux.O_NOFOLLOW != 0 {
- return syserror.ELOOP
+ return linuxerr.ELOOP
}
// Try to resolve the symlink directly to a Dirent.
@@ -394,8 +395,8 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
}
var newFile *fs.File
- switch err {
- case nil:
+ switch {
+ case err == nil:
// Like sys_open, check for a few things about the
// filesystem before trying to get a reference to the
// fs.File. The same constraints on Check apply.
@@ -415,10 +416,10 @@ func createAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, flags uint, mode
// Create a new fs.File.
newFile, err = found.Inode.GetFile(t, found, fileFlags)
if err != nil {
- return syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
defer newFile.DecRef(t)
- case syserror.ENOENT:
+ case linuxerr.Equals(linuxerr.ENOENT, err):
// File does not exist. Proceed with creation.
// Do we have write permissions on the parent?
@@ -527,7 +528,7 @@ func accessAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode uint) error
// Sanity check the mode.
if mode&^(rOK|wOK|xOK) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
return fileOpOn(t, dirFD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
@@ -595,7 +596,7 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -684,7 +685,7 @@ func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Note this is >= because we need a terminator.
if uint(len(s)) >= size {
- return 0, nil, syserror.ERANGE
+ return 0, nil, linuxerr.ERANGE
}
// Copy out the path name for the node.
@@ -703,7 +704,7 @@ func Chroot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
addr := args[0].Pointer()
if !t.HasCapability(linux.CAP_SYS_CHROOT) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
@@ -714,7 +715,7 @@ func Chroot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
// Is it a directory?
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Does it have execute permissions?
@@ -739,7 +740,7 @@ func Chdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
// Is it a directory?
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Does it have execute permissions?
@@ -758,13 +759,13 @@ func Fchdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Is it a directory?
if !fs.IsDir(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.ENOTDIR
+ return 0, nil, linuxerr.ENOTDIR
}
// Does it have execute permissions?
@@ -789,12 +790,12 @@ func Close(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// (and other reference-holding operations complete).
file, _ := t.FDTable().Remove(t, fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
err := file.Flush(t)
- return 0, nil, handleIOError(t, false /* partial */, err, syserror.EINTR, "close", file)
+ return 0, nil, handleIOError(t, false /* partial */, err, linuxerr.EINTR, "close", file)
}
// Dup implements linux syscall dup(2).
@@ -803,13 +804,13 @@ func Dup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
newFD, err := t.NewFDFrom(0, file, kernel.FDFlags{})
if err != nil {
- return 0, nil, syserror.EMFILE
+ return 0, nil, linuxerr.EMFILE
}
return uintptr(newFD), nil, nil
}
@@ -824,7 +825,7 @@ func Dup2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
if oldfd == newfd {
oldFile := t.GetFile(oldfd)
if oldFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer oldFile.DecRef(t)
@@ -843,12 +844,12 @@ func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
flags := args[2].Uint()
if oldfd == newfd {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
oldFile := t.GetFile(oldfd)
if oldFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer oldFile.DecRef(t)
@@ -905,7 +906,7 @@ func fSetOwn(t *kernel.Task, fd int, file *fs.File, who int32) error {
if who < 0 {
// Check for overflow before flipping the sign.
if who-1 > who {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
pg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(-who))
a.SetOwnerProcessGroup(t, pg)
@@ -923,7 +924,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file, flags := t.FDTable().Get(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -956,7 +957,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// Normally pipe and socket types lack lock operations. We diverge and use a heavy
// hammer by only allowing locks on files and directories.
if !fs.IsFile(file.Dirent.Inode.StableAttr) && !fs.IsDir(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Copy in the lock request.
@@ -976,7 +977,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case 2:
sw = fs.SeekEnd
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Compute the lock offset.
@@ -995,7 +996,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
}
off = uattr.Size
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Compute the lock range.
@@ -1009,33 +1010,33 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
switch flock.Type {
case linux.F_RDLCK:
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
if cmd == linux.F_SETLK {
// Non-blocking lock, provide a nil lock.Blocker.
if !file.Dirent.Inode.LockCtx.Posix.LockRegionVFS1(t.FDTable(), lock.ReadLock, rng, nil) {
- return 0, nil, syserror.EAGAIN
+ return 0, nil, linuxerr.EAGAIN
}
} else {
// Blocking lock, pass in the task to satisfy the lock.Blocker interface.
if !file.Dirent.Inode.LockCtx.Posix.LockRegionVFS1(t.FDTable(), lock.ReadLock, rng, t) {
- return 0, nil, syserror.EINTR
+ return 0, nil, linuxerr.EINTR
}
}
return 0, nil, nil
case linux.F_WRLCK:
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
if cmd == linux.F_SETLK {
// Non-blocking lock, provide a nil lock.Blocker.
if !file.Dirent.Inode.LockCtx.Posix.LockRegionVFS1(t.FDTable(), lock.WriteLock, rng, nil) {
- return 0, nil, syserror.EAGAIN
+ return 0, nil, linuxerr.EAGAIN
}
} else {
// Blocking lock, pass in the task to satisfy the lock.Blocker interface.
if !file.Dirent.Inode.LockCtx.Posix.LockRegionVFS1(t.FDTable(), lock.WriteLock, rng, t) {
- return 0, nil, syserror.EINTR
+ return 0, nil, linuxerr.EINTR
}
}
return 0, nil, nil
@@ -1043,7 +1044,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file.Dirent.Inode.LockCtx.Posix.UnlockRegion(t.FDTable(), rng)
return 0, nil, nil
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
case linux.F_GETOWN:
return uintptr(fGetOwn(t, file)), nil, nil
@@ -1066,47 +1067,47 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.F_OWNER_TID:
task := t.PIDNamespace().TaskWithID(kernel.ThreadID(owner.PID))
if task == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
a.SetOwnerTask(t, task)
return 0, nil, nil
case linux.F_OWNER_PID:
tg := t.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(owner.PID))
if tg == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
a.SetOwnerThreadGroup(t, tg)
return 0, nil, nil
case linux.F_OWNER_PGRP:
pg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(owner.PID))
if pg == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
a.SetOwnerProcessGroup(t, pg)
return 0, nil, nil
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
case linux.F_GET_SEALS:
val, err := tmpfs.GetSeals(file.Dirent.Inode)
return uintptr(val), nil, err
case linux.F_ADD_SEALS:
if !file.Flags().Write {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
err := tmpfs.AddSeals(file.Dirent.Inode, args[2].Uint())
return 0, nil, err
case linux.F_GETPIPE_SZ:
sz, ok := file.FileOperations.(fs.FifoSizer)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
size, err := sz.FifoSize(t, file)
return uintptr(size), nil, err
case linux.F_SETPIPE_SZ:
sz, ok := file.FileOperations.(fs.FifoSizer)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
n, err := sz.SetFifoSize(int64(args[2].Int()))
return uintptr(n), nil, err
@@ -1118,7 +1119,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, a.SetSignal(linux.Signal(args[2].Int()))
default:
// Everything else is not yet supported.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
@@ -1131,18 +1132,18 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// Note: offset is allowed to be negative.
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// If the FD refers to a pipe or FIFO, return error.
if fs.IsPipe(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
switch advice {
@@ -1153,7 +1154,7 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
case linux.POSIX_FADV_DONTNEED:
case linux.POSIX_FADV_NOREUSE:
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Sure, whatever.
@@ -1172,18 +1173,18 @@ func mkdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, mode linux.FileMod
return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string, _ uint) error {
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Does this directory exist already?
remainingTraversals := uint(linux.MaxSymlinkTraversals)
f, err := t.MountNamespace().FindInode(t, root, d, name, &remainingTraversals)
- switch err {
- case nil:
+ switch {
+ case err == nil:
// The directory existed.
defer f.DecRef(t)
- return syserror.EEXIST
- case syserror.EACCES:
+ return linuxerr.EEXIST
+ case linuxerr.Equals(linuxerr.EACCES, err):
// Permission denied while walking to the directory.
return err
default:
@@ -1224,21 +1225,21 @@ func rmdirAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
// Special case: removing the root always returns EBUSY.
if path == "/" {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string, _ uint) error {
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Linux returns different ernos when the path ends in single
// dot vs. double dots.
switch name {
case ".":
- return syserror.EINVAL
+ return linuxerr.EINVAL
case "..":
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
if err := d.MayDelete(t, root, name); err != nil {
@@ -1262,7 +1263,7 @@ func symlinkAt(t *kernel.Task, dirFD int32, newAddr hostarch.Addr, oldAddr hosta
return err
}
if dirPath {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// The oldPath is copied in verbatim. This is because the symlink
@@ -1272,12 +1273,12 @@ func symlinkAt(t *kernel.Task, dirFD int32, newAddr hostarch.Addr, oldAddr hosta
return err
}
if oldPath == "" {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
return fileOpAt(t, dirFD, newPath, func(root *fs.Dirent, d *fs.Dirent, name string, _ uint) error {
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Make sure we have write permissions on the parent directory.
@@ -1329,10 +1330,10 @@ func mayLinkAt(t *kernel.Task, target *fs.Inode) error {
// If we are not the owner, then the file must be regular and have
// Read+Write permissions.
if !fs.IsRegular(target.StableAttr) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if target.CheckPermission(t, fs.PermMask{Read: true, Write: true}) != nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
return nil
@@ -1351,13 +1352,13 @@ func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int3
return err
}
if dirPath {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if allowEmpty && oldPath == "" {
target := t.GetFile(oldDirFD)
if target == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer target.DecRef(t)
if err := mayLinkAt(t, target.Dirent.Inode); err != nil {
@@ -1367,7 +1368,7 @@ func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int3
// Resolve the target directory.
return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string, _ uint) error {
if !fs.IsDir(newParent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Make sure we have write permissions on the parent directory.
@@ -1388,7 +1389,7 @@ func linkAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD int3
// Next resolve newDirFD and newAddr to the parent dirent and name.
return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string, _ uint) error {
if !fs.IsDir(newParent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Make sure we have write permissions on the parent directory.
@@ -1431,14 +1432,14 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Sanity check flags.
if flags&^(linux.AT_SYMLINK_FOLLOW|linux.AT_EMPTY_PATH) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
resolve := flags&linux.AT_SYMLINK_FOLLOW == linux.AT_SYMLINK_FOLLOW
allowEmpty := flags&linux.AT_EMPTY_PATH == linux.AT_EMPTY_PATH
if allowEmpty && !t.HasCapabilityIn(linux.CAP_DAC_READ_SEARCH, t.UserNamespace().Root()) {
- return 0, nil, syserror.ENOENT
+ return 0, nil, linuxerr.ENOENT
}
return 0, nil, linkAt(t, oldDirFD, oldAddr, newDirFD, newAddr, resolve, allowEmpty)
@@ -1454,7 +1455,7 @@ func readlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, bufAddr hostarc
return 0, err
}
if dirPath {
- return 0, syserror.ENOENT
+ return 0, linuxerr.ENOENT
}
err = fileOpOn(t, dirFD, path, false /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
@@ -1464,8 +1465,8 @@ func readlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr, bufAddr hostarc
}
s, err := d.Inode.Readlink(t)
- if err == syserror.ENOLINK {
- return syserror.EINVAL
+ if linuxerr.Equals(linuxerr.ENOLINK, err) {
+ return linuxerr.EINVAL
}
if err != nil {
return err
@@ -1519,7 +1520,7 @@ func unlinkAt(t *kernel.Task, dirFD int32, addr hostarch.Addr) error {
return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string, _ uint) error {
if !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
if err := d.MayDelete(t, root, name); err != nil {
@@ -1557,7 +1558,7 @@ func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
length := args[1].Int64()
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */)
@@ -1565,7 +1566,7 @@ func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, err
}
if dirPath {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if uint64(length) >= t.ThreadGroup().Limits().Get(limits.FileSize).Cur {
@@ -1573,17 +1574,17 @@ func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
Signo: int32(linux.SIGXFSZ),
Code: linux.SI_USER,
})
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
if fs.IsDir(d.Inode.StableAttr) {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
// In contrast to open(O_TRUNC), truncate(2) is only valid for file
// types.
if !fs.IsFile(d.Inode.StableAttr) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Reject truncation if the access permissions do not allow truncation.
@@ -1610,25 +1611,25 @@ func Ftruncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Reject truncation if the file flags do not permit this operation.
// This is different from truncate(2) above.
if !file.Flags().Write {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// In contrast to open(O_TRUNC), truncate(2) is only valid for file
// types. Note that this is different from truncate(2) above, where a
// directory returns EISDIR.
if !fs.IsFile(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if uint64(length) >= t.ThreadGroup().Limits().Get(limits.FileSize).Cur {
@@ -1636,7 +1637,7 @@ func Ftruncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
Signo: int32(linux.SIGXFSZ),
Code: linux.SI_USER,
})
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
if err := file.Dirent.Inode.Truncate(t, file.Dirent, length); err != nil {
@@ -1682,7 +1683,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
kuid := c.UserNamespace.MapToKUID(uid)
// Valid UID must be supplied if UID is to be changed.
if !kuid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// "Only a privileged process (CAP_CHOWN) may change the owner
@@ -1692,7 +1693,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
// explicitly not changing its UID.
isNoop := uattr.Owner.UID == kuid
if !(hasCap || (isOwner && isNoop)) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// The setuid and setgid bits are cleared during a chown.
@@ -1706,7 +1707,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
kgid := c.UserNamespace.MapToKGID(gid)
// Valid GID must be supplied if GID is to be changed.
if !kgid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// "The owner of a file may change the group of the file to any
@@ -1715,7 +1716,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
isNoop := uattr.Owner.GID == kgid
isMemberGroup := c.InGroup(kgid)
if !(hasCap || (isOwner && (isNoop || isMemberGroup))) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// The setuid and setgid bits are cleared during a chown.
@@ -1737,7 +1738,7 @@ func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error {
if clearPrivilege && uattr.Perms.HasSetUIDOrGID() && !fs.IsDir(d.Inode.StableAttr) {
uattr.Perms.DropSetUIDAndMaybeGID()
if !d.Inode.SetPermissions(t, d, uattr.Perms) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
@@ -1754,7 +1755,7 @@ func chownAt(t *kernel.Task, fd int32, addr hostarch.Addr, resolve, allowEmpty b
// Annoying. What's wrong with fchown?
file := t.GetFile(fd)
if file == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer file.DecRef(t)
@@ -1792,7 +1793,7 @@ func Fchown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -1808,7 +1809,7 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
flags := args[4].Int()
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, chownAt(t, dirFD, addr, flags&linux.AT_SYMLINK_NOFOLLOW == 0, flags&linux.AT_EMPTY_PATH != 0, uid, gid)
@@ -1817,12 +1818,12 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
func chmod(t *kernel.Task, d *fs.Dirent, mode linux.FileMode) error {
// Must own file to change mode.
if !d.Inode.CheckOwnership(t) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
p := fs.FilePermsFromMode(mode)
if !d.Inode.SetPermissions(t, d, p) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// File attribute changed, generate notification.
@@ -1857,7 +1858,7 @@ func Fchmod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -1888,7 +1889,7 @@ func utimes(t *kernel.Task, dirFD int32, addr hostarch.Addr, ts fs.TimeSpec, res
if !d.Inode.CheckOwnership(t) {
// Trying to set a specific time? Must be owner.
if (ts.ATimeOmit || !ts.ATimeSetSystemTime) && (ts.MTimeOmit || !ts.MTimeSetSystemTime) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Trying to set to current system time? Must have write access.
@@ -1913,11 +1914,11 @@ func utimes(t *kernel.Task, dirFD int32, addr hostarch.Addr, ts fs.TimeSpec, res
if addr == 0 && dirFD != linux.AT_FDCWD {
if !resolve {
// Linux returns EINVAL in this case. See utimes.c.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
f := t.GetFile(dirFD)
if f == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer f.DecRef(t)
@@ -1996,7 +1997,7 @@ func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, err
}
if !timespecIsValid(times[0]) || !timespecIsValid(times[1]) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// If both are UTIME_OMIT, this is a noop.
@@ -2031,7 +2032,7 @@ func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
if times[0].Usec >= 1e6 || times[0].Usec < 0 ||
times[1].Usec >= 1e6 || times[1].Usec < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ts = fs.TimeSpec{
@@ -2058,26 +2059,26 @@ func renameAt(t *kernel.Task, oldDirFD int32, oldAddr hostarch.Addr, newDirFD in
return fileOpAt(t, oldDirFD, oldPath, func(root *fs.Dirent, oldParent *fs.Dirent, oldName string, _ uint) error {
if !fs.IsDir(oldParent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Rename rejects paths that end in ".", "..", or empty (i.e.
// the root) with EBUSY.
switch oldName {
case "", ".", "..":
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string, _ uint) error {
if !fs.IsDir(newParent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Rename rejects paths that end in ".", "..", or empty
// (i.e. the root) with EBUSY.
switch newName {
case "", ".", "..":
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
return fs.Rename(t, root, oldParent, oldName, newParent, newName)
@@ -2112,39 +2113,39 @@ func Fallocate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if offset < 0 || length <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if mode != 0 {
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOTSUP
+ return 0, nil, linuxerr.ENOTSUP
}
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
if fs.IsPipe(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
if fs.IsDir(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EISDIR
+ return 0, nil, linuxerr.EISDIR
}
if !fs.IsRegular(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.ENODEV
+ return 0, nil, linuxerr.ENODEV
}
size := offset + length
if size < 0 {
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
if uint64(size) >= t.ThreadGroup().Limits().Get(limits.FileSize).Cur {
t.SendSignal(&linux.SignalInfo{
Signo: int32(linux.SIGXFSZ),
Code: linux.SI_USER,
})
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
if err := file.Dirent.Inode.Allocate(t, file.Dirent, offset, length); err != nil {
@@ -2165,7 +2166,7 @@ func Flock(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
// flock(2): EBADF fd is not an open file descriptor.
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -2183,31 +2184,31 @@ func Flock(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
if nonblocking {
// Since we're nonblocking we pass a nil lock.Blocker implementation.
if !file.Dirent.Inode.LockCtx.BSD.LockRegionVFS1(file, lock.WriteLock, rng, nil) {
- return 0, nil, syserror.EWOULDBLOCK
+ return 0, nil, linuxerr.EWOULDBLOCK
}
} else {
// Because we're blocking we will pass the task to satisfy the lock.Blocker interface.
if !file.Dirent.Inode.LockCtx.BSD.LockRegionVFS1(file, lock.WriteLock, rng, t) {
- return 0, nil, syserror.EINTR
+ return 0, nil, linuxerr.EINTR
}
}
case linux.LOCK_SH:
if nonblocking {
// Since we're nonblocking we pass a nil lock.Blocker implementation.
if !file.Dirent.Inode.LockCtx.BSD.LockRegionVFS1(file, lock.ReadLock, rng, nil) {
- return 0, nil, syserror.EWOULDBLOCK
+ return 0, nil, linuxerr.EWOULDBLOCK
}
} else {
// Because we're blocking we will pass the task to satisfy the lock.Blocker interface.
if !file.Dirent.Inode.LockCtx.BSD.LockRegionVFS1(file, lock.ReadLock, rng, t) {
- return 0, nil, syserror.EINTR
+ return 0, nil, linuxerr.EINTR
}
}
case linux.LOCK_UN:
file.Dirent.Inode.LockCtx.BSD.UnlockRegion(file, rng)
default:
// flock(2): EINVAL operation is invalid.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
@@ -2226,7 +2227,7 @@ func MemfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
if flags&^memfdAllFlags != 0 {
// Unknown bits in flags.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
allowSeals := flags&linux.MFD_ALLOW_SEALING != 0
@@ -2237,7 +2238,7 @@ func MemfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
return 0, nil, err
}
if len(name) > memfdMaxNameLen {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
name = memfdPrefix + name
diff --git a/pkg/sentry/syscalls/linux/sys_futex.go b/pkg/sentry/syscalls/linux/sys_futex.go
index eeea1613b..bcdd7b633 100644
--- a/pkg/sentry/syscalls/linux/sys_futex.go
+++ b/pkg/sentry/syscalls/linux/sys_futex.go
@@ -18,11 +18,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// futexWaitRestartBlock encapsulates the state required to restart futex(2)
@@ -74,7 +75,7 @@ func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, fo
}
t.Futex().WaitComplete(w, t)
- return 0, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
// futexWaitDuration performs a FUTEX_WAIT, blocking until the wait is
@@ -102,7 +103,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add
// The wait was unsuccessful for some reason other than interruption. Simply
// forward the error.
- if err != syserror.ErrInterrupted {
+ if err != linuxerr.ErrInterrupted {
return 0, err
}
@@ -110,7 +111,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add
// The wait duration was absolute, restart with the original arguments.
if forever {
- return 0, syserror.ERESTARTSYS
+ return 0, linuxerr.ERESTARTSYS
}
// The wait duration was relative, restart with the remaining duration.
@@ -121,7 +122,7 @@ func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, add
val: val,
mask: mask,
})
- return 0, syserror.ERESTART_RESTARTBLOCK
+ return 0, linuxerr.ERESTART_RESTARTBLOCK
}
func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr hostarch.Addr, private bool) error {
@@ -149,7 +150,7 @@ func futexLockPI(t *kernel.Task, ts linux.Timespec, forever bool, addr hostarch.
}
t.Futex().WaitComplete(w, t)
- return syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
func tryLockPI(t *kernel.Task, addr hostarch.Addr, private bool) error {
@@ -159,7 +160,7 @@ func tryLockPI(t *kernel.Task, addr hostarch.Addr, private bool) error {
return err
}
if !locked {
- return syserror.EWOULDBLOCK
+ return linuxerr.EWOULDBLOCK
}
return nil
}
@@ -210,7 +211,7 @@ func Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// WAIT_BITSET uses an absolute timeout which is either
// CLOCK_MONOTONIC or CLOCK_REALTIME.
if mask == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
n, err := futexWaitAbsolute(t, clockRealtime, timespec, forever, addr, private, uint32(val), mask)
return n, nil, err
@@ -224,7 +225,7 @@ func Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.FUTEX_WAKE_BITSET:
if mask == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if val <= 0 {
// The Linux kernel wakes one waiter even if val is
@@ -279,11 +280,11 @@ func Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.FUTEX_WAIT_REQUEUE_PI, linux.FUTEX_CMP_REQUEUE_PI:
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
default:
// We don't even know about this command.
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
}
@@ -295,7 +296,7 @@ func SetRobustList(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
length := args[1].SizeT()
if length != uint(linux.SizeOfRobustListHead) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.SetRobustList(head)
return 0, nil, nil
@@ -310,13 +311,13 @@ func GetRobustList(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
sizeAddr := args[2].Pointer()
if tid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ot := t
if tid != 0 {
if ot = t.PIDNamespace().TaskWithID(kernel.ThreadID(tid)); ot == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_getdents.go b/pkg/sentry/syscalls/linux/sys_getdents.go
index bbba71d8f..9f7a5ae8a 100644
--- a/pkg/sentry/syscalls/linux/sys_getdents.go
+++ b/pkg/sentry/syscalls/linux/sys_getdents.go
@@ -19,11 +19,11 @@ import (
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -38,7 +38,7 @@ func Getdents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
minSize := int(smallestDirent(t.Arch()))
if size < minSize {
// size is smaller than smallest possible dirent.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
n, err := getdents(t, fd, addr, size, (*dirent).Serialize)
@@ -54,7 +54,7 @@ func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
minSize := int(smallestDirent64(t.Arch()))
if size < minSize {
// size is smaller than smallest possible dirent.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
n, err := getdents(t, fd, addr, size, (*dirent).Serialize64)
@@ -66,7 +66,7 @@ func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
func getdents(t *kernel.Task, fd int32, addr hostarch.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) {
dir := t.GetFile(fd)
if dir == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer dir.DecRef(t)
@@ -82,7 +82,7 @@ func getdents(t *kernel.Task, fd int32, addr hostarch.Addr, size int, f func(*di
ds := newDirentSerializer(f, w, t.Arch(), size)
rerr := dir.Readdir(t, ds)
- switch err := handleIOError(t, ds.Written() > 0, rerr, syserror.ERESTARTSYS, "getdents", dir); err {
+ switch err := handleIOError(t, ds.Written() > 0, rerr, linuxerr.ERESTARTSYS, "getdents", dir); err {
case nil:
dir.Dirent.InotifyEvent(linux.IN_ACCESS, 0)
return uintptr(ds.Written()), nil
diff --git a/pkg/sentry/syscalls/linux/sys_identity.go b/pkg/sentry/syscalls/linux/sys_identity.go
index a29d307e5..50fcadb58 100644
--- a/pkg/sentry/syscalls/linux/sys_identity.go
+++ b/pkg/sentry/syscalls/linux/sys_identity.go
@@ -15,10 +15,10 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -142,7 +142,7 @@ func Setresgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
func Getgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
size := int(args[0].Int())
if size < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
kgids := t.Credentials().ExtraKGIDs
// "If size is zero, list is not modified, but the total number of
@@ -151,7 +151,7 @@ func Getgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return uintptr(len(kgids)), nil, nil
}
if size < len(kgids) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
gids := make([]auth.GID, len(kgids))
for i, kgid := range kgids {
@@ -167,7 +167,7 @@ func Getgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
func Setgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
size := args[0].Int()
if size < 0 || size > maxNGroups {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if size == 0 {
return 0, nil, t.SetExtraGIDs(nil)
diff --git a/pkg/sentry/syscalls/linux/sys_inotify.go b/pkg/sentry/syscalls/linux/sys_inotify.go
index cf47bb9dd..b7ad1922e 100644
--- a/pkg/sentry/syscalls/linux/sys_inotify.go
+++ b/pkg/sentry/syscalls/linux/sys_inotify.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/anon"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
const allFlags = int(linux.IN_NONBLOCK | linux.IN_CLOEXEC)
@@ -30,7 +30,7 @@ func InotifyInit1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
flags := int(args[0].Int())
if flags&^allFlags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
dirent := fs.NewDirent(t, anon.NewInode(t), "inotify")
@@ -65,14 +65,14 @@ func fdToInotify(t *kernel.Task, fd int32) (*fs.Inotify, *fs.File, error) {
file := t.GetFile(fd)
if file == nil {
// Invalid fd.
- return nil, nil, syserror.EBADF
+ return nil, nil, linuxerr.EBADF
}
ino, ok := file.FileOperations.(*fs.Inotify)
if !ok {
// Not an inotify fd.
file.DecRef(t)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
return ino, file, nil
@@ -91,7 +91,7 @@ func InotifyAddWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kern
// "EINVAL: The given event mask contains no valid events."
// -- inotify_add_watch(2)
if validBits := mask & linux.ALL_INOTIFY_BITS; validBits == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ino, file, err := fdToInotify(t, fd)
@@ -108,7 +108,7 @@ func InotifyAddWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kern
err = fileOpOn(t, linux.AT_FDCWD, path, resolve, func(root *fs.Dirent, dirent *fs.Dirent, _ uint) error {
// "IN_ONLYDIR: Only watch pathname if it is a directory." -- inotify(7)
if onlyDir := mask&linux.IN_ONLYDIR != 0; onlyDir && !fs.IsDir(dirent.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Copy out to the return frame.
diff --git a/pkg/sentry/syscalls/linux/sys_lseek.go b/pkg/sentry/syscalls/linux/sys_lseek.go
index 0046347cb..4a5712a29 100644
--- a/pkg/sentry/syscalls/linux/sys_lseek.go
+++ b/pkg/sentry/syscalls/linux/sys_lseek.go
@@ -15,10 +15,10 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -31,7 +31,7 @@ func Lseek(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -44,11 +44,11 @@ func Lseek(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case 2:
sw = fs.SeekEnd
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
offset, serr := file.Seek(t, sw, offset)
- err := handleIOError(t, false /* partialResult */, serr, syserror.ERESTARTSYS, "lseek", file)
+ err := handleIOError(t, false /* partialResult */, serr, linuxerr.ERESTARTSYS, "lseek", file)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_membarrier.go b/pkg/sentry/syscalls/linux/sys_membarrier.go
index 63ee5d435..6ceedc086 100644
--- a/pkg/sentry/syscalls/linux/sys_membarrier.go
+++ b/pkg/sentry/syscalls/linux/sys_membarrier.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Membarrier implements syscall membarrier(2).
@@ -29,7 +29,7 @@ func Membarrier(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
switch cmd {
case linux.MEMBARRIER_CMD_QUERY:
if flags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var supportedCommands uintptr
if t.Kernel().Platform.HaveGlobalMemoryBarrier() {
@@ -46,58 +46,58 @@ func Membarrier(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return supportedCommands, nil, nil
case linux.MEMBARRIER_CMD_GLOBAL, linux.MEMBARRIER_CMD_GLOBAL_EXPEDITED, linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED:
if flags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if cmd == linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED && !t.MemoryManager().IsMembarrierPrivateEnabled() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
return 0, nil, t.Kernel().Platform.GlobalMemoryBarrier()
case linux.MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
if flags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// no-op
return 0, nil, nil
case linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
if flags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.Kernel().Platform.HaveGlobalMemoryBarrier() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.MemoryManager().EnableMembarrierPrivate()
return 0, nil, nil
case linux.MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
if flags&^linux.MEMBARRIER_CMD_FLAG_CPU != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.RSeqAvailable() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.MemoryManager().IsMembarrierRSeqEnabled() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
// MEMBARRIER_CMD_FLAG_CPU and cpu_id are ignored since we don't have
// the ability to preempt specific CPUs.
return 0, nil, t.Kernel().Platform.PreemptAllCPUs()
case linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
if flags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if !t.RSeqAvailable() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.MemoryManager().EnableMembarrierRSeq()
return 0, nil, nil
default:
// Probably a command we don't implement.
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_mempolicy.go b/pkg/sentry/syscalls/linux/sys_mempolicy.go
index 6d27f4292..6e7bcb868 100644
--- a/pkg/sentry/syscalls/linux/sys_mempolicy.go
+++ b/pkg/sentry/syscalls/linux/sys_mempolicy.go
@@ -18,10 +18,10 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -43,7 +43,7 @@ func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64,
// maxnode-1, not maxnode, as the number of bits.
bits := maxnode - 1
if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if bits == 0 {
return 0, nil
@@ -58,12 +58,12 @@ func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64,
// Check that only allowed bits in the first unsigned long in the nodemask
// are set.
if val&^allowedNodemask != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Check that all remaining bits in the nodemask are 0.
for i := 8; i < len(buf); i++ {
if buf[i] != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
return val, nil
@@ -74,7 +74,7 @@ func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uin
// bits.
bits := maxnode - 1
if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if bits == 0 {
return nil
@@ -89,7 +89,7 @@ func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uin
if bits > 64 {
remAddr, ok := addr.AddLength(8)
if !ok {
- return syserror.EFAULT
+ return linuxerr.EFAULT
}
remUint64 := (bits - 1) / 64
if _, err := t.MemoryManager().ZeroOut(t, remAddr, int64(remUint64)*8, usermem.IOOpts{
@@ -110,7 +110,7 @@ func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
flags := args[4].Uint()
if flags&^(linux.MPOL_F_NODE|linux.MPOL_F_ADDR|linux.MPOL_F_MEMS_ALLOWED) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
nodeFlag := flags&linux.MPOL_F_NODE != 0
addrFlag := flags&linux.MPOL_F_ADDR != 0
@@ -119,7 +119,7 @@ func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// "EINVAL: The value specified by maxnode is less than the number of node
// IDs supported by the system." - get_mempolicy(2)
if nodemask != 0 && maxnode < maxNodes {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "If flags specifies MPOL_F_MEMS_ALLOWED [...], the mode argument is
@@ -130,7 +130,7 @@ func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// "It is not permitted to combine MPOL_F_MEMS_ALLOWED with either
// MPOL_F_ADDR or MPOL_F_NODE."
if nodeFlag || addrFlag {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if err := copyOutNodemask(t, nodemask, maxnode, allowedNodemask); err != nil {
return 0, nil, err
@@ -184,7 +184,7 @@ func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// mm/mempolicy.c:do_get_mempolicy() doesn't special-case NULL; it will
// just (usually) fail to find a VMA at address 0 and return EFAULT.
if addr != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "If flags is specified as 0, then information about the calling thread's
@@ -198,7 +198,7 @@ func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
policy, nodemaskVal := t.NumaPolicy()
if nodeFlag {
if policy&^linux.MPOL_MODE_FLAGS != linux.MPOL_INTERLEAVE {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
policy = linux.MPOL_DEFAULT // maxNodes == 1
}
@@ -240,12 +240,12 @@ func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
flags := args[5].Uint()
if flags&^linux.MPOL_MF_VALID != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "If MPOL_MF_MOVE_ALL is passed in flags ... [the] calling thread must be
// privileged (CAP_SYS_NICE) to use this flag." - mbind(2)
if flags&linux.MPOL_MF_MOVE_ALL != 0 && !t.HasCapability(linux.CAP_SYS_NICE) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
mode, nodemaskVal, err := copyInMempolicyNodemask(t, mode, nodemask, maxnode)
@@ -264,11 +264,11 @@ func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nod
mode := linux.NumaPolicy(modeWithFlags &^ linux.MPOL_MODE_FLAGS)
if flags == linux.MPOL_MODE_FLAGS {
// Can't specify both mode flags simultaneously.
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
if mode < 0 || mode >= linux.MPOL_MAX {
// Must specify a valid mode.
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
var nodemaskVal uint64
@@ -285,22 +285,22 @@ func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nod
// "nodemask must be specified as NULL." - set_mempolicy(2). This is inaccurate;
// Linux allows a nodemask to be specified, as long as it is empty.
if nodemaskVal != 0 {
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
case linux.MPOL_BIND, linux.MPOL_INTERLEAVE:
// These require a non-empty nodemask.
if nodemaskVal == 0 {
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
case linux.MPOL_PREFERRED:
// This permits an empty nodemask, as long as no flags are set.
if nodemaskVal == 0 && flags != 0 {
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
case linux.MPOL_LOCAL:
// This requires an empty nodemask and no flags set ...
if nodemaskVal != 0 || flags != 0 {
- return 0, 0, syserror.EINVAL
+ return 0, 0, linuxerr.EINVAL
}
// ... and is implemented as MPOL_PREFERRED.
mode = linux.MPOL_PREFERRED
diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go
index 70da0707d..7efd17d40 100644
--- a/pkg/sentry/syscalls/linux/sys_mmap.go
+++ b/pkg/sentry/syscalls/linux/sys_mmap.go
@@ -18,13 +18,13 @@ import (
"bytes"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// Brk implements linux syscall brk(2).
@@ -51,7 +51,7 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Require exactly one of MAP_PRIVATE and MAP_SHARED.
if private == shared {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
opts := memmap.MMapOpts{
@@ -84,14 +84,14 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Convert the passed FD to a file reference.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
flags := file.Flags()
// mmap unconditionally requires that the FD is readable.
if !flags.Read {
- return 0, nil, syserror.EACCES
+ return 0, nil, linuxerr.EACCES
}
// MAP_SHARED requires that the FD be writable for PROT_WRITE.
if shared && !flags.Write {
@@ -132,7 +132,7 @@ func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
newAddr := args[4].Pointer()
if flags&^(linux.MREMAP_MAYMOVE|linux.MREMAP_FIXED) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
mayMove := flags&linux.MREMAP_MAYMOVE != 0
fixed := flags&linux.MREMAP_FIXED != 0
@@ -147,7 +147,7 @@ func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case !mayMove && fixed:
// "If MREMAP_FIXED is specified, then MREMAP_MAYMOVE must also be
// specified." - mremap(2)
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
rv, err := t.MemoryManager().MRemap(t, oldAddr, oldSize, newSize, mm.MRemapOpts{
@@ -178,7 +178,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// "The Linux implementation requires that the address addr be
// page-aligned, and allows length to be zero." - madvise(2)
if addr.RoundDown() != addr {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if length == 0 {
return 0, nil, nil
@@ -186,7 +186,7 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// Not explicitly stated: length need not be page-aligned.
lenAddr, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
length = uint64(lenAddr)
@@ -211,13 +211,13 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
case linux.MADV_REMOVE:
// These "suggestions" have application-visible side effects, so we
// have to indicate that we don't support them.
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
case linux.MADV_HWPOISON:
// Only privileged processes are allowed to poison pages.
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
default:
// If adv is not a valid value tell the caller.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
@@ -228,25 +228,25 @@ func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
vec := args[2].Pointer()
if addr != addr.RoundDown() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "The length argument need not be a multiple of the page size, but since
// residency information is returned for whole pages, length is effectively
// rounded up to the next multiple of the page size." - mincore(2)
la, ok := hostarch.Addr(length).RoundUp()
if !ok {
- return 0, nil, syserror.ENOMEM
+ return 0, nil, linuxerr.ENOMEM
}
ar, ok := addr.ToRange(uint64(la))
if !ok {
- return 0, nil, syserror.ENOMEM
+ return 0, nil, linuxerr.ENOMEM
}
// Pretend that all mapped pages are "resident in core".
mapped := t.MemoryManager().VirtualMemorySizeRange(ar)
// "ENOMEM: addr to addr + length contained unmapped memory."
if mapped != uint64(la) {
- return 0, nil, syserror.ENOMEM
+ return 0, nil, linuxerr.ENOMEM
}
resident := bytes.Repeat([]byte{1}, int(mapped/hostarch.PageSize))
_, err := t.CopyOutBytes(vec, resident)
@@ -265,11 +265,11 @@ func Msync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// semantics that are (currently) equivalent to specifying MS_ASYNC." -
// msync(2)
if flags&^(linux.MS_ASYNC|linux.MS_SYNC|linux.MS_INVALIDATE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
sync := flags&linux.MS_SYNC != 0
if sync && flags&linux.MS_ASYNC != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
err := t.MemoryManager().MSync(t, addr, uint64(length), mm.MSyncOpts{
Sync: sync,
@@ -277,7 +277,7 @@ func Msync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
})
// MSync calls fsync, the same interrupt conversion rules apply, see
// mm/msync.c, fsync POSIX.1-2008.
- return 0, nil, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
// Mlock implements linux syscall mlock(2).
@@ -295,7 +295,7 @@ func Mlock2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
flags := args[2].Int()
if flags&^(linux.MLOCK_ONFAULT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
mode := memmap.MLockEager
@@ -318,7 +318,7 @@ func Mlockall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
flags := args[0].Int()
if flags&^(linux.MCL_CURRENT|linux.MCL_FUTURE|linux.MCL_ONFAULT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
mode := memmap.MLockEager
diff --git a/pkg/sentry/syscalls/linux/sys_mount.go b/pkg/sentry/syscalls/linux/sys_mount.go
index 864d2138c..6d26f89b9 100644
--- a/pkg/sentry/syscalls/linux/sys_mount.go
+++ b/pkg/sentry/syscalls/linux/sys_mount.go
@@ -16,12 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -67,7 +66,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// Must have CAP_SYS_ADMIN in the mount namespace's associated user
// namespace.
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespace().UserNamespace()) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
const unsupportedOps = linux.MS_REMOUNT | linux.MS_BIND |
@@ -83,15 +82,15 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// unknown or unsupported flags are passed. Since we don't implement
// everything, we fail explicitly on flags that are unimplemented.
if flags&(unsupportedOps|unsupportedFlags) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
rsys, ok := fs.FindFilesystem(fsType)
if !ok {
- return 0, nil, syserror.ENODEV
+ return 0, nil, linuxerr.ENODEV
}
if !rsys.AllowUserMount() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
var superFlags fs.MountSourceFlags
@@ -107,7 +106,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
rootInode, err := rsys.Mount(t, sourcePath, superFlags, data, nil)
if err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if err := fileOpOn(t, linux.AT_FDCWD, targetPath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
@@ -130,7 +129,7 @@ func Umount2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
const unsupported = linux.MNT_FORCE | linux.MNT_EXPIRE
if flags&unsupported != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, _, err := copyInPath(t, addr, false /* allowEmpty */)
@@ -143,7 +142,7 @@ func Umount2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
//
// Currently, this is always the init task's user namespace.
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespace().UserNamespace()) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
resolve := flags&linux.UMOUNT_NOFOLLOW != linux.UMOUNT_NOFOLLOW
diff --git a/pkg/sentry/syscalls/linux/sys_msgqueue.go b/pkg/sentry/syscalls/linux/sys_msgqueue.go
new file mode 100644
index 000000000..60b989ee7
--- /dev/null
+++ b/pkg/sentry/syscalls/linux/sys_msgqueue.go
@@ -0,0 +1,193 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package linux
+
+import (
+ "gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/marshal/primitive"
+ "gvisor.dev/gvisor/pkg/sentry/arch"
+ "gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/auth"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/msgqueue"
+)
+
+// Msgget implements msgget(2).
+func Msgget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
+ key := ipc.Key(args[0].Int())
+ flag := args[1].Int()
+
+ private := key == linux.IPC_PRIVATE
+ create := flag&linux.IPC_CREAT == linux.IPC_CREAT
+ exclusive := flag&linux.IPC_EXCL == linux.IPC_EXCL
+ mode := linux.FileMode(flag & 0777)
+
+ r := t.IPCNamespace().MsgqueueRegistry()
+ queue, err := r.FindOrCreate(t, key, mode, private, create, exclusive)
+ if err != nil {
+ return 0, nil, err
+ }
+ return uintptr(queue.ID()), nil, nil
+}
+
+// Msgsnd implements msgsnd(2).
+func Msgsnd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
+ id := ipc.ID(args[0].Int())
+ msgAddr := args[1].Pointer()
+ size := args[2].Int64()
+ flag := args[3].Int()
+
+ if size < 0 || size > linux.MSGMAX {
+ return 0, nil, linuxerr.EINVAL
+ }
+
+ wait := flag&linux.IPC_NOWAIT != linux.IPC_NOWAIT
+ pid := int32(t.ThreadGroup().ID())
+
+ buf := linux.MsgBuf{
+ Text: make([]byte, size),
+ }
+ if _, err := buf.CopyIn(t, msgAddr); err != nil {
+ return 0, nil, err
+ }
+
+ queue, err := t.IPCNamespace().MsgqueueRegistry().FindByID(id)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ msg := msgqueue.Message{
+ Type: int64(buf.Type),
+ Text: buf.Text,
+ Size: uint64(size),
+ }
+ return 0, nil, queue.Send(t, msg, t, wait, pid)
+}
+
+// Msgrcv implements msgrcv(2).
+func Msgrcv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
+ id := ipc.ID(args[0].Int())
+ msgAddr := args[1].Pointer()
+ size := args[2].Int64()
+ mType := args[3].Int64()
+ flag := args[4].Int()
+
+ wait := flag&linux.IPC_NOWAIT != linux.IPC_NOWAIT
+ except := flag&linux.MSG_EXCEPT == linux.MSG_EXCEPT
+ truncate := flag&linux.MSG_NOERROR == linux.MSG_NOERROR
+
+ msgCopy := flag&linux.MSG_COPY == linux.MSG_COPY
+
+ msg, err := receive(t, id, mType, size, msgCopy, wait, truncate, except)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ buf := linux.MsgBuf{
+ Type: primitive.Int64(msg.Type),
+ Text: msg.Text,
+ }
+ if _, err := buf.CopyOut(t, msgAddr); err != nil {
+ return 0, nil, err
+ }
+ return uintptr(msg.Size), nil, nil
+}
+
+// receive returns a message from the queue with the given ID. If msgCopy is
+// true, a message is copied from the queue without being removed. Otherwise,
+// a message is removed from the queue and returned.
+func receive(t *kernel.Task, id ipc.ID, mType int64, maxSize int64, msgCopy, wait, truncate, except bool) (*msgqueue.Message, error) {
+ pid := int32(t.ThreadGroup().ID())
+
+ queue, err := t.IPCNamespace().MsgqueueRegistry().FindByID(id)
+ if err != nil {
+ return nil, err
+ }
+
+ if msgCopy {
+ if wait || except {
+ return nil, linuxerr.EINVAL
+ }
+ return queue.Copy(mType)
+ }
+ return queue.Receive(t, t, mType, maxSize, wait, truncate, except, pid)
+}
+
+// Msgctl implements msgctl(2).
+func Msgctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
+ id := ipc.ID(args[0].Int())
+ cmd := args[1].Int()
+ buf := args[2].Pointer()
+
+ creds := auth.CredentialsFromContext(t)
+
+ r := t.IPCNamespace().MsgqueueRegistry()
+
+ switch cmd {
+ case linux.IPC_INFO:
+ info := r.IPCInfo(t)
+ _, err := info.CopyOut(t, buf)
+ return 0, nil, err
+ case linux.MSG_INFO:
+ msgInfo := r.MsgInfo(t)
+ _, err := msgInfo.CopyOut(t, buf)
+ return 0, nil, err
+ case linux.IPC_RMID:
+ return 0, nil, r.Remove(id, creds)
+ }
+
+ // Remaining commands use a queue.
+ queue, err := r.FindByID(id)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ switch cmd {
+ case linux.MSG_STAT:
+ // Technically, we should be treating id as "an index into the kernel's
+ // internal array that maintains information about all shared memory
+ // segments on the system". Since we don't track segments in an array,
+ // we'll just pretend the msqid is the index and do the same thing as
+ // IPC_STAT. Linux also uses the index as the msqid.
+ fallthrough
+ case linux.IPC_STAT:
+ stat, err := queue.Stat(t)
+ if err != nil {
+ return 0, nil, err
+ }
+ _, err = stat.CopyOut(t, buf)
+ return 0, nil, err
+
+ case linux.MSG_STAT_ANY:
+ stat, err := queue.StatAny(t)
+ if err != nil {
+ return 0, nil, err
+ }
+ _, err = stat.CopyOut(t, buf)
+ return 0, nil, err
+
+ case linux.IPC_SET:
+ var ds linux.MsqidDS
+ if _, err := ds.CopyIn(t, buf); err != nil {
+ return 0, nil, linuxerr.EINVAL
+ }
+ err := queue.Set(t, &ds)
+ return 0, nil, err
+
+ default:
+ return 0, nil, linuxerr.EINVAL
+ }
+}
diff --git a/pkg/sentry/syscalls/linux/sys_pipe.go b/pkg/sentry/syscalls/linux/sys_pipe.go
index d95034347..5925c2263 100644
--- a/pkg/sentry/syscalls/linux/sys_pipe.go
+++ b/pkg/sentry/syscalls/linux/sys_pipe.go
@@ -16,13 +16,13 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -30,7 +30,7 @@ import (
// pipe2 implements the actual system call with flags.
func pipe2(t *kernel.Task, addr hostarch.Addr, flags uint) (uintptr, error) {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
r, w := pipe.NewConnectedPipe(t, pipe.DefaultPipeSize)
diff --git a/pkg/sentry/syscalls/linux/sys_poll.go b/pkg/sentry/syscalls/linux/sys_poll.go
index da548a14a..ee4dbbc64 100644
--- a/pkg/sentry/syscalls/linux/sys_poll.go
+++ b/pkg/sentry/syscalls/linux/sys_poll.go
@@ -18,13 +18,14 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -128,7 +129,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
// Wait for a notification.
timeout, err = t.BlockWithTimeout(ch, !forever, timeout)
if err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
err = nil
}
return timeout, 0, err
@@ -157,7 +158,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
// CopyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
func CopyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
pfd := make([]linux.PollFD, nfds)
@@ -184,7 +185,7 @@ func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration
pfd[i].Events |= linux.POLLHUP | linux.POLLERR
}
remainingTimeout, n, err := pollBlock(t, pfd, timeout)
- err = syserror.ConvertIntr(err, syserror.EINTR)
+ err = syserr.ConvertIntr(err, linuxerr.EINTR)
// The poll entries are copied out regardless of whether
// any are set or not. This aligns with the Linux behavior.
@@ -217,7 +218,7 @@ func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialB
func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Calculate the size of the fd sets (one bit per fd).
@@ -264,7 +265,7 @@ func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Ad
// OK. Linux is racy in the same way.
file := t.GetFile(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
file.DecRef(t)
@@ -294,7 +295,7 @@ func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Ad
// Do the syscall, then count the number of bits set.
if _, _, err = pollBlock(t, pfd, timeout); err != nil {
- return 0, syserror.ConvertIntr(err, syserror.EINTR)
+ return 0, syserr.ConvertIntr(err, linuxerr.EINTR)
}
// r, w, and e are currently event mask bitsets; unset bits corresponding
@@ -404,13 +405,13 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
- if err == syserror.EINTR {
+ if linuxerr.Equals(linuxerr.EINTR, err) {
t.SetSyscallRestartBlock(&pollRestartBlock{
pfdAddr: pfdAddr,
nfds: nfds,
timeout: remainingTimeout,
})
- return 0, syserror.ERESTART_RESTARTBLOCK
+ return 0, linuxerr.ERESTART_RESTARTBLOCK
}
return n, err
}
@@ -463,8 +464,8 @@ func Ppoll(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
//
// Note that this means that if err is nil but copyErr is not, copyErr is
// ignored. This is consistent with Linux.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
@@ -485,7 +486,7 @@ func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, err
}
if timeval.Sec < 0 || timeval.Usec < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
timeout = time.Duration(timeval.ToNsecCapped())
}
@@ -493,8 +494,8 @@ func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout)
copyErr := copyOutTimevalRemaining(t, startNs, timeout, timevalAddr)
// See comment in Ppoll.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
@@ -538,8 +539,8 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout)
copyErr := copyOutTimespecRemaining(t, startNs, timeout, timespecAddr)
// See comment in Ppoll.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
diff --git a/pkg/sentry/syscalls/linux/sys_prctl.go b/pkg/sentry/syscalls/linux/sys_prctl.go
index 9890dd946..2ef1e6404 100644
--- a/pkg/sentry/syscalls/linux/sys_prctl.go
+++ b/pkg/sentry/syscalls/linux/sys_prctl.go
@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/mm"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Prctl implements linux syscall prctl(2).
@@ -38,7 +38,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_SET_PDEATHSIG:
sig := linux.Signal(args[1].Int())
if sig != 0 && !sig.IsValid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.SetParentDeathSignal(sig)
return 0, nil, nil
@@ -69,7 +69,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
d = mm.UserDumpable
default:
// N.B. Userspace may not pass SUID_DUMP_ROOT.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.MemoryManager().SetDumpability(d)
return 0, nil, nil
@@ -90,7 +90,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
} else if val == 1 {
t.SetKeepCaps(true)
} else {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
@@ -98,7 +98,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_SET_NAME:
addr := args[1].Pointer()
name, err := t.CopyInString(addr, linux.TASK_COMM_LEN-1)
- if err != nil && err != syserror.ENAMETOOLONG {
+ if err != nil && !linuxerr.Equals(linuxerr.ENAMETOOLONG, err) {
return 0, nil, err
}
t.SetName(name)
@@ -118,7 +118,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_SET_MM:
if !t.HasCapability(linux.CAP_SYS_RESOURCE) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
switch args[1].Int() {
@@ -127,13 +127,13 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// They trying to set exe to a non-file?
if !fs.IsFile(file.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Set the underlying executable.
@@ -155,12 +155,12 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
t.Kernel().EmitUnimplementedEvent(t)
fallthrough
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
case linux.PR_SET_NO_NEW_PRIVS:
if args[1].Int() != 1 || args[2].Int() != 0 || args[3].Int() != 0 || args[4].Int() != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// PR_SET_NO_NEW_PRIVS is assumed to always be set.
// See kernel.Task.updateCredsForExecLocked.
@@ -168,7 +168,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_GET_NO_NEW_PRIVS:
if args[1].Int() != 0 || args[2].Int() != 0 || args[3].Int() != 0 || args[4].Int() != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 1, nil, nil
@@ -184,7 +184,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
default:
tracer := t.PIDNamespace().TaskWithID(kernel.ThreadID(pid))
if tracer == nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
t.SetYAMAException(tracer)
return 0, nil, nil
@@ -193,7 +193,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_SET_SECCOMP:
if args[1].Int() != linux.SECCOMP_MODE_FILTER {
// Unsupported mode.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, seccomp(t, linux.SECCOMP_SET_MODE_FILTER, 0, args[2].Pointer())
@@ -204,7 +204,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_CAPBSET_READ:
cp := linux.Capability(args[1].Uint64())
if !cp.Ok() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var rv uintptr
if auth.CapabilitySetOf(cp)&t.Credentials().BoundingCaps != 0 {
@@ -215,10 +215,25 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.PR_CAPBSET_DROP:
cp := linux.Capability(args[1].Uint64())
if !cp.Ok() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, t.DropBoundingCapability(cp)
+ case linux.PR_SET_CHILD_SUBREAPER:
+ // "If arg2 is nonzero, set the "child subreaper" attribute of
+ // the calling process; if arg2 is zero, unset the attribute."
+ //
+ // TODO(gvisor.dev/issues/2323): We only support setting, and
+ // only if the task is already TID 1 in the PID namespace,
+ // because it already acts as a subreaper in that case.
+ isPid1 := t.PIDNamespace().IDOfTask(t) == kernel.InitTID
+ if args[1].Int() != 0 && isPid1 {
+ return 0, nil, nil
+ }
+
+ t.Kernel().EmitUnimplementedEvent(t)
+ return 0, nil, linuxerr.EINVAL
+
case linux.PR_GET_TIMING,
linux.PR_SET_TIMING,
linux.PR_GET_TSC,
@@ -230,7 +245,6 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
linux.PR_MCE_KILL,
linux.PR_MCE_KILL_GET,
linux.PR_GET_TID_ADDRESS,
- linux.PR_SET_CHILD_SUBREAPER,
linux.PR_GET_CHILD_SUBREAPER,
linux.PR_GET_THP_DISABLE,
linux.PR_SET_THP_DISABLE,
@@ -240,7 +254,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
t.Kernel().EmitUnimplementedEvent(t)
fallthrough
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go
index ae545f80f..f86e87bc7 100644
--- a/pkg/sentry/syscalls/linux/sys_random.go
+++ b/pkg/sentry/syscalls/linux/sys_random.go
@@ -18,14 +18,13 @@ import (
"io"
"math"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
const (
@@ -47,7 +46,7 @@ func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// Flags are checked for validity but otherwise ignored. See above.
if flags & ^(_GRND_NONBLOCK|_GRND_RANDOM) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if length > math.MaxInt32 {
@@ -55,7 +54,7 @@ func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
ar, ok := addr.ToRange(uint64(length))
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
// "If the urandom source has been initialized, reads of up to 256 bytes
diff --git a/pkg/sentry/syscalls/linux/sys_read.go b/pkg/sentry/syscalls/linux/sys_read.go
index 13e5e3a51..18ea23913 100644
--- a/pkg/sentry/syscalls/linux/sys_read.go
+++ b/pkg/sentry/syscalls/linux/sys_read.go
@@ -18,12 +18,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/socket"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -46,19 +46,19 @@ func Read(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -71,7 +71,7 @@ func Read(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
n, err := readv(t, file, dst)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "read", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "read", file)
}
// Readahead implements readahead(2).
@@ -82,29 +82,29 @@ func Readahead(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is valid.
if int(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Return EINVAL; if the underlying file type does not support readahead,
// then Linux will return EINVAL to indicate as much. In the future, we
// may extend this function to actually support readahead hints.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Pread64 implements linux syscall pread64(2).
@@ -116,29 +116,29 @@ func Pread64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is reading at an offset supported?
if !file.Flags().Pread {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -151,7 +151,7 @@ func Pread64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := preadv(t, file, dst, offset)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "pread64", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "pread64", file)
}
// Readv implements linux syscall readv(2).
@@ -162,13 +162,13 @@ func Readv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Read the iovecs that specify the destination of the read.
@@ -181,7 +181,7 @@ func Readv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
n, err := readv(t, file, dst)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "readv", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "readv", file)
}
// Preadv implements linux syscall preadv(2).
@@ -193,23 +193,23 @@ func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is reading at an offset supported?
if !file.Flags().Pread {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Read the iovecs that specify the destination of the read.
@@ -222,7 +222,7 @@ func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := preadv(t, file, dst, offset)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "preadv", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "preadv", file)
}
// Preadv2 implements linux syscall preadv2(2).
@@ -242,30 +242,30 @@ func Preadv2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < -1 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is reading at an offset supported?
if offset > -1 && !file.Flags().Pread {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Check that the file is readable.
if !file.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check flags field.
// Note: gVisor does not implement the RWF_HIPRI feature, but the flag is
// accepted as a valid flag argument for preadv2.
if flags&^linux.RWF_VALID != 0 {
- return 0, nil, syserror.EOPNOTSUPP
+ return 0, nil, linuxerr.EOPNOTSUPP
}
// Read the iovecs that specify the destination of the read.
@@ -280,17 +280,17 @@ func Preadv2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if offset == -1 {
n, err := readv(t, file, dst)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "preadv2", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "preadv2", file)
}
n, err := preadv(t, file, dst, offset)
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "preadv2", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "preadv2", file)
}
func readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) {
n, err := f.Readv(t, dst)
- if err != syserror.ErrWouldBlock || f.Flags().NonBlocking {
+ if err != linuxerr.ErrWouldBlock || f.Flags().NonBlocking {
if n > 0 {
// Queue notification if we read anything.
f.Dirent.InotifyEvent(linux.IN_ACCESS, 0)
@@ -303,7 +303,7 @@ func readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) {
var deadline ktime.Time
if s, ok := f.FileOperations.(socket.Socket); ok {
dl := s.RecvTimeout()
- if dl < 0 && err == syserror.ErrWouldBlock {
+ if dl < 0 && err == linuxerr.ErrWouldBlock {
return n, err
}
if dl > 0 {
@@ -325,14 +325,14 @@ func readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) {
// other than "would block".
n, err = f.Readv(t, dst)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -350,7 +350,7 @@ func readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) {
func preadv(t *kernel.Task, f *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
n, err := f.Preadv(t, dst, offset)
- if err != syserror.ErrWouldBlock || f.Flags().NonBlocking {
+ if err != linuxerr.ErrWouldBlock || f.Flags().NonBlocking {
if n > 0 {
// Queue notification if we read anything.
f.Dirent.InotifyEvent(linux.IN_ACCESS, 0)
@@ -371,7 +371,7 @@ func preadv(t *kernel.Task, f *fs.File, dst usermem.IOSequence, offset int64) (i
// other than "would block".
n, err = f.Preadv(t, dst, offset+total)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
diff --git a/pkg/sentry/syscalls/linux/sys_rlimit.go b/pkg/sentry/syscalls/linux/sys_rlimit.go
index e64246d57..7210333d2 100644
--- a/pkg/sentry/syscalls/linux/sys_rlimit.go
+++ b/pkg/sentry/syscalls/linux/sys_rlimit.go
@@ -16,12 +16,12 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/syserror"
)
// rlimit describes an implementation of 'struct rlimit', which may vary from
@@ -43,7 +43,7 @@ func newRlimit(t *kernel.Task) (rlimit, error) {
// On 64-bit system, struct rlimit and struct rlimit64 are identical.
return &rlimit64{}, nil
default:
- return nil, syserror.ENOSYS
+ return nil, linuxerr.ENOSYS
}
}
@@ -105,7 +105,7 @@ func prlimit64(t *kernel.Task, resource limits.LimitType, newLim *limits.Limit)
}
if _, ok := setableLimits[resource]; !ok {
- return limits.Limit{}, syserror.EPERM
+ return limits.Limit{}, linuxerr.EPERM
}
// "A privileged process (under Linux: one with the CAP_SYS_RESOURCE
@@ -129,7 +129,7 @@ func Getrlimit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
resource, ok := limits.FromLinuxResource[int(args[0].Int())]
if !ok {
// Return err; unknown limit.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
addr := args[1].Pointer()
rlim, err := newRlimit(t)
@@ -150,7 +150,7 @@ func Setrlimit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
resource, ok := limits.FromLinuxResource[int(args[0].Int())]
if !ok {
// Return err; unknown limit.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
addr := args[1].Pointer()
rlim, err := newRlimit(t)
@@ -158,7 +158,7 @@ func Setrlimit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, err
}
if _, err := rlim.CopyIn(t, addr); err != nil {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
_, err = prlimit64(t, resource, rlim.toLimit())
return 0, nil, err
@@ -170,7 +170,7 @@ func Prlimit64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
resource, ok := limits.FromLinuxResource[int(args[1].Int())]
if !ok {
// Return err; unknown limit.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
newRlimAddr := args[2].Pointer()
oldRlimAddr := args[3].Pointer()
@@ -179,18 +179,18 @@ func Prlimit64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
if newRlimAddr != 0 {
var nrl rlimit64
if err := nrl.copyIn(t, newRlimAddr); err != nil {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
newLim = nrl.toLimit()
}
if tid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ot := t
if tid > 0 {
if ot = t.PIDNamespace().TaskWithID(tid); ot == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
}
@@ -207,7 +207,7 @@ func Prlimit64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
cred.RealKGID != tcred.RealKGID ||
cred.RealKGID != tcred.EffectiveKGID ||
cred.RealKGID != tcred.SavedKGID {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
}
@@ -218,7 +218,7 @@ func Prlimit64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
if oldRlimAddr != 0 {
if err := makeRlimit64(oldLim).copyOut(t, oldRlimAddr); err != nil {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_rseq.go b/pkg/sentry/syscalls/linux/sys_rseq.go
index 90db10ea6..8328a3742 100644
--- a/pkg/sentry/syscalls/linux/sys_rseq.go
+++ b/pkg/sentry/syscalls/linux/sys_rseq.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// RSeq implements syscall rseq(2).
@@ -32,7 +32,7 @@ func RSeq(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Event for applications that want rseq on a configuration
// that doesn't support them.
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
switch flags {
@@ -43,6 +43,6 @@ func RSeq(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
return 0, nil, t.ClearRSeq(addr, length, signature)
default:
// Unknown flag.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_rusage.go b/pkg/sentry/syscalls/linux/sys_rusage.go
index ac5c98a54..a689abcc9 100644
--- a/pkg/sentry/syscalls/linux/sys_rusage.go
+++ b/pkg/sentry/syscalls/linux/sys_rusage.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/usage"
- "gvisor.dev/gvisor/pkg/syserror"
)
func getrusage(t *kernel.Task, which int32) linux.Rusage {
@@ -76,7 +76,7 @@ func Getrusage(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
addr := args[1].Pointer()
if which != linux.RUSAGE_SELF && which != linux.RUSAGE_CHILDREN && which != linux.RUSAGE_THREAD {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ru := getrusage(t, which)
diff --git a/pkg/sentry/syscalls/linux/sys_sched.go b/pkg/sentry/syscalls/linux/sys_sched.go
index bfcf44b6f..59c7a4b22 100644
--- a/pkg/sentry/syscalls/linux/sys_sched.go
+++ b/pkg/sentry/syscalls/linux/sys_sched.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -38,13 +38,13 @@ func SchedGetparam(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
pid := args[0].Int()
param := args[1].Pointer()
if param == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if pid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
r := SchedParam{schedPriority: onlyPriority}
if _, err := r.CopyOut(t, param); err != nil {
@@ -58,10 +58,10 @@ func SchedGetparam(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
func SchedGetscheduler(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
pid := args[0].Int()
if pid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
return onlyScheduler, nil, nil
}
@@ -72,20 +72,20 @@ func SchedSetscheduler(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ke
policy := args[1].Int()
param := args[2].Pointer()
if pid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if policy != onlyScheduler {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
var r SchedParam
if _, err := r.CopyIn(t, param); err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if r.schedPriority != onlyPriority {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
}
diff --git a/pkg/sentry/syscalls/linux/sys_seccomp.go b/pkg/sentry/syscalls/linux/sys_seccomp.go
index e16d6ff3f..b0dc84b8d 100644
--- a/pkg/sentry/syscalls/linux/sys_seccomp.go
+++ b/pkg/sentry/syscalls/linux/sys_seccomp.go
@@ -17,10 +17,10 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// userSockFprog is equivalent to Linux's struct sock_fprog on amd64.
@@ -44,7 +44,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error {
// We only support SECCOMP_SET_MODE_FILTER at the moment.
if mode != linux.SECCOMP_SET_MODE_FILTER {
// Unsupported mode.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
tsync := flags&linux.SECCOMP_FILTER_FLAG_TSYNC != 0
@@ -52,7 +52,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error {
// The only flag we support now is SECCOMP_FILTER_FLAG_TSYNC.
if flags&^linux.SECCOMP_FILTER_FLAG_TSYNC != 0 {
// Unsupported flag.
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
var fprog userSockFprog
@@ -66,7 +66,7 @@ func seccomp(t *kernel.Task, mode, flags uint64, addr hostarch.Addr) error {
compiledFilter, err := bpf.Compile(filter)
if err != nil {
t.Debugf("Invalid seccomp-bpf filter: %v", err)
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
return t.AppendSyscallFilter(compiledFilter, tsync)
diff --git a/pkg/sentry/syscalls/linux/sys_sem.go b/pkg/sentry/syscalls/linux/sys_sem.go
index c84260080..5a119b21c 100644
--- a/pkg/sentry/syscalls/linux/sys_sem.go
+++ b/pkg/sentry/syscalls/linux/sys_sem.go
@@ -19,20 +19,20 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
- "gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
)
const opsMax = 500 // SEMOPM
// Semget handles: semget(key_t key, int nsems, int semflg)
func Semget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- key := args[0].Int()
+ key := ipc.Key(args[0].Int())
nsems := args[1].Int()
flag := args[2].Int()
@@ -46,7 +46,7 @@ func Semget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
if err != nil {
return 0, nil, err
}
- return uintptr(set.ID), nil, nil
+ return uintptr(set.ID()), nil, nil
}
// Semtimedop handles: semop(int semid, struct sembuf *sops, size_t nsops, const struct timespec *timeout)
@@ -56,15 +56,15 @@ func Semtimedop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return Semop(t, args)
}
- id := args[0].Int()
+ id := ipc.ID(args[0].Int())
sembufAddr := args[1].Pointer()
nsops := args[2].SizeT()
timespecAddr := args[3].Pointer()
if nsops <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if nsops > opsMax {
- return 0, nil, syserror.E2BIG
+ return 0, nil, linuxerr.E2BIG
}
ops := make([]linux.Sembuf, nsops)
@@ -77,12 +77,12 @@ func Semtimedop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return 0, nil, err
}
if timeout.Sec < 0 || timeout.Nsec < 0 || timeout.Nsec >= 1e9 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if err := semTimedOp(t, id, ops, true, timeout.ToDuration()); err != nil {
- if err == syserror.ETIMEDOUT {
- return 0, nil, syserror.EAGAIN
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ return 0, nil, linuxerr.EAGAIN
}
return 0, nil, err
}
@@ -91,15 +91,15 @@ func Semtimedop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Semop handles: semop(int semid, struct sembuf *sops, size_t nsops)
func Semop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- id := args[0].Int()
+ id := ipc.ID(args[0].Int())
sembufAddr := args[1].Pointer()
nsops := args[2].SizeT()
if nsops <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if nsops > opsMax {
- return 0, nil, syserror.E2BIG
+ return 0, nil, linuxerr.E2BIG
}
ops := make([]linux.Sembuf, nsops)
@@ -109,11 +109,11 @@ func Semop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, semTimedOp(t, id, ops, false, time.Second)
}
-func semTimedOp(t *kernel.Task, id int32, ops []linux.Sembuf, haveTimeout bool, timeout time.Duration) error {
+func semTimedOp(t *kernel.Task, id ipc.ID, ops []linux.Sembuf, haveTimeout bool, timeout time.Duration) error {
set := t.IPCNamespace().SemaphoreRegistry().FindByID(id)
if set == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
pid := t.Kernel().GlobalInit().PIDNamespace().IDOfThreadGroup(t.ThreadGroup())
@@ -131,7 +131,7 @@ func semTimedOp(t *kernel.Task, id int32, ops []linux.Sembuf, haveTimeout bool,
// Semctl handles: semctl(int semid, int semnum, int cmd, ...)
func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- id := args[0].Int()
+ id := ipc.ID(args[0].Int())
num := args[1].Int()
cmd := args[2].Int()
@@ -139,7 +139,7 @@ func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.SETVAL:
val := args[3].Int()
if val > math.MaxInt16 {
- return 0, nil, syserror.ERANGE
+ return 0, nil, linuxerr.ERANGE
}
return 0, nil, setVal(t, id, num, int16(val))
@@ -165,8 +165,7 @@ func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, err
}
- perms := fs.FilePermsFromMode(linux.FileMode(s.SemPerm.Mode & 0777))
- return 0, nil, ipcSet(t, id, auth.UID(s.SemPerm.UID), auth.GID(s.SemPerm.GID), perms)
+ return 0, nil, ipcSet(t, id, &s)
case linux.GETPID:
v, err := getPID(t, id, num)
@@ -210,7 +209,7 @@ func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.SEM_STAT:
arg := args[3].Pointer()
// id is an index in SEM_STAT.
- semid, ds, err := semStat(t, id)
+ semid, ds, err := semStat(t, int32(id))
if err != nil {
return 0, nil, err
}
@@ -222,7 +221,7 @@ func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.SEM_STAT_ANY:
arg := args[3].Pointer()
// id is an index in SEM_STAT.
- semid, ds, err := semStatAny(t, id)
+ semid, ds, err := semStatAny(t, int32(id))
if err != nil {
return 0, nil, err
}
@@ -232,41 +231,30 @@ func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return uintptr(semid), nil, err
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
-func remove(t *kernel.Task, id int32) error {
+func remove(t *kernel.Task, id ipc.ID) error {
r := t.IPCNamespace().SemaphoreRegistry()
creds := auth.CredentialsFromContext(t)
- return r.RemoveID(id, creds)
+ return r.Remove(id, creds)
}
-func ipcSet(t *kernel.Task, id int32, uid auth.UID, gid auth.GID, perms fs.FilePermissions) error {
+func ipcSet(t *kernel.Task, id ipc.ID, ds *linux.SemidDS) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
-
- creds := auth.CredentialsFromContext(t)
- kuid := creds.UserNamespace.MapToKUID(uid)
- if !kuid.Ok() {
- return syserror.EINVAL
- }
- kgid := creds.UserNamespace.MapToKGID(gid)
- if !kgid.Ok() {
- return syserror.EINVAL
- }
- owner := fs.FileOwner{UID: kuid, GID: kgid}
- return set.Change(t, creds, owner, perms)
+ return set.Set(t, ds)
}
-func ipcStat(t *kernel.Task, id int32) (*linux.SemidDS, error) {
+func ipcStat(t *kernel.Task, id ipc.ID) (*linux.SemidDS, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
return set.GetStat(creds)
@@ -276,45 +264,45 @@ func semStat(t *kernel.Task, index int32) (int32, *linux.SemidDS, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByIndex(index)
if set == nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
ds, err := set.GetStat(creds)
if err != nil {
return 0, ds, err
}
- return set.ID, ds, nil
+ return int32(set.ID()), ds, nil
}
func semStatAny(t *kernel.Task, index int32) (int32, *linux.SemidDS, error) {
set := t.IPCNamespace().SemaphoreRegistry().FindByIndex(index)
if set == nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
ds, err := set.GetStatAny(creds)
if err != nil {
return 0, ds, err
}
- return set.ID, ds, nil
+ return int32(set.ID()), ds, nil
}
-func setVal(t *kernel.Task, id int32, num int32, val int16) error {
+func setVal(t *kernel.Task, id ipc.ID, num int32, val int16) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
pid := t.Kernel().GlobalInit().PIDNamespace().IDOfThreadGroup(t.ThreadGroup())
return set.SetVal(t, num, val, creds, int32(pid))
}
-func setValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
+func setValAll(t *kernel.Task, id ipc.ID, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
vals := make([]uint16, set.Size())
if _, err := primitive.CopyUint16SliceIn(t, array, vals); err != nil {
@@ -325,21 +313,21 @@ func setValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
return set.SetValAll(t, vals, creds, int32(pid))
}
-func getVal(t *kernel.Task, id int32, num int32) (int16, error) {
+func getVal(t *kernel.Task, id ipc.ID, num int32) (int16, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
return set.GetVal(num, creds)
}
-func getValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
+func getValAll(t *kernel.Task, id ipc.ID, array hostarch.Addr) error {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
vals, err := set.GetValAll(creds)
@@ -350,11 +338,11 @@ func getValAll(t *kernel.Task, id int32, array hostarch.Addr) error {
return err
}
-func getPID(t *kernel.Task, id int32, num int32) (int32, error) {
+func getPID(t *kernel.Task, id ipc.ID, num int32) (int32, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
gpid, err := set.GetPID(num, creds)
@@ -369,21 +357,21 @@ func getPID(t *kernel.Task, id int32, num int32) (int32, error) {
return int32(tg.ID()), nil
}
-func getZCnt(t *kernel.Task, id int32, num int32) (uint16, error) {
+func getZCnt(t *kernel.Task, id ipc.ID, num int32) (uint16, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
return set.CountZeroWaiters(num, creds)
}
-func getNCnt(t *kernel.Task, id int32, num int32) (uint16, error) {
+func getNCnt(t *kernel.Task, id ipc.ID, num int32) (uint16, error) {
r := t.IPCNamespace().SemaphoreRegistry()
set := r.FindByID(id)
if set == nil {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
creds := auth.CredentialsFromContext(t)
return set.CountNegativeWaiters(num, creds)
diff --git a/pkg/sentry/syscalls/linux/sys_shm.go b/pkg/sentry/syscalls/linux/sys_shm.go
index 584064143..840540506 100644
--- a/pkg/sentry/syscalls/linux/sys_shm.go
+++ b/pkg/sentry/syscalls/linux/sys_shm.go
@@ -16,15 +16,16 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
+ "gvisor.dev/gvisor/pkg/sentry/kernel/ipc"
"gvisor.dev/gvisor/pkg/sentry/kernel/shm"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Shmget implements shmget(2).
func Shmget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- key := shm.Key(args[0].Int())
+ key := ipc.Key(args[0].Int())
size := uint64(args[1].SizeT())
flag := args[2].Int()
@@ -40,31 +41,31 @@ func Shmget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, err
}
defer segment.DecRef(t)
- return uintptr(segment.ID), nil, nil
+ return uintptr(segment.ID()), nil, nil
}
// findSegment retrives a shm segment by the given id.
//
// findSegment returns a reference on Shm.
-func findSegment(t *kernel.Task, id shm.ID) (*shm.Shm, error) {
+func findSegment(t *kernel.Task, id ipc.ID) (*shm.Shm, error) {
r := t.IPCNamespace().ShmRegistry()
segment := r.FindByID(id)
if segment == nil {
// No segment with provided id.
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
return segment, nil
}
// Shmat implements shmat(2).
func Shmat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- id := shm.ID(args[0].Int())
+ id := ipc.ID(args[0].Int())
addr := args[1].Pointer()
flag := args[2].Int()
segment, err := findSegment(t, id)
if err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
defer segment.DecRef(t)
@@ -89,7 +90,7 @@ func Shmdt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// Shmctl implements shmctl(2).
func Shmctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- id := shm.ID(args[0].Int())
+ id := ipc.ID(args[0].Int())
cmd := args[1].Int()
buf := args[2].Pointer()
@@ -106,7 +107,7 @@ func Shmctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.IPC_STAT:
segment, err := findSegment(t, id)
if err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
defer segment.DecRef(t)
@@ -130,7 +131,7 @@ func Shmctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Remaining commands refer to a specific segment.
segment, err := findSegment(t, id)
if err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
defer segment.DecRef(t)
@@ -155,6 +156,6 @@ func Shmctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, nil
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go
index 27a7f7fe1..03871d713 100644
--- a/pkg/sentry/syscalls/linux/sys_signal.go
+++ b/pkg/sentry/syscalls/linux/sys_signal.go
@@ -19,12 +19,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/signalfd"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// "For a process to have permission to send a signal it must
@@ -79,10 +80,10 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
for {
target := t.PIDNamespace().TaskWithID(pid)
if target == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
if !mayKill(t, target, sig) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
info := &linux.SignalInfo{
Signo: int32(sig),
@@ -90,7 +91,7 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
}
info.SetPID(int32(target.PIDNamespace().IDOfTask(t)))
info.SetUID(int32(t.Credentials().RealKUID.In(target.UserNamespace()).OrOverflow()))
- if err := target.SendGroupSignal(info); err != syserror.ESRCH {
+ if err := target.SendGroupSignal(info); !linuxerr.Equals(linuxerr.ESRCH, err) {
return 0, nil, err
}
}
@@ -130,7 +131,7 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
info.SetPID(int32(tg.PIDNamespace().IDOfTask(t)))
info.SetUID(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow()))
err := tg.SendSignal(info)
- if err == syserror.ESRCH {
+ if linuxerr.Equals(linuxerr.ESRCH, err) {
// ESRCH is ignored because it means the task
// exited while we were iterating. This is a
// race which would not normally exist on
@@ -145,7 +146,7 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
if delivered > 0 {
return 0, nil, lastErr
}
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
default:
// "If pid equals 0, then sig is sent to every process in the process
// group of the calling process."
@@ -159,11 +160,11 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// If pid != -1 (i.e. signalling a process group), the returned error
// is the last error from any call to group_send_sig_info.
- lastErr := syserror.ESRCH
+ lastErr := error(linuxerr.ESRCH)
for _, tg := range t.PIDNamespace().ThreadGroups() {
if t.PIDNamespace().IDOfProcessGroup(tg.ProcessGroup()) == pgid {
if !mayKill(t, tg.Leader(), sig) {
- lastErr = syserror.EPERM
+ lastErr = linuxerr.EPERM
continue
}
@@ -174,7 +175,7 @@ func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
info.SetPID(int32(tg.PIDNamespace().IDOfTask(t)))
info.SetUID(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow()))
// See note above regarding ESRCH race above.
- if err := tg.SendSignal(info); err != syserror.ESRCH {
+ if err := tg.SendSignal(info); !linuxerr.Equals(linuxerr.ESRCH, err) {
lastErr = err
}
}
@@ -202,16 +203,16 @@ func Tkill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// N.B. Inconsistent with man page, linux actually rejects calls with
// tid <=0 by EINVAL. This isn't the same for all signal calls.
if tid <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
target := t.PIDNamespace().TaskWithID(tid)
if target == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
if !mayKill(t, target, sig) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
return 0, nil, target.SendSignal(tkillSigInfo(t, target, sig))
}
@@ -225,17 +226,17 @@ func Tgkill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// N.B. Inconsistent with man page, linux actually rejects calls with
// tgid/tid <=0 by EINVAL. This isn't the same for all signal calls.
if tgid <= 0 || tid <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
targetTG := t.PIDNamespace().ThreadGroupWithID(tgid)
target := t.PIDNamespace().TaskWithID(tid)
if targetTG == nil || target == nil || target.ThreadGroup() != targetTG {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
if !mayKill(t, target, sig) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
return 0, nil, target.SendSignal(tkillSigInfo(t, target, sig))
}
@@ -248,7 +249,7 @@ func RtSigaction(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
sigsetsize := args[3].SizeT()
if sigsetsize != linux.SignalSetSize {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var newactptr *linux.SigAction
@@ -291,7 +292,7 @@ func RtSigprocmask(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
sigsetsize := args[3].SizeT()
if sigsetsize != linux.SignalSetSize {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
oldmask := t.SignalMask()
if setaddr != 0 {
@@ -308,7 +309,7 @@ func RtSigprocmask(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
case linux.SIG_SETMASK:
t.SetSignalMask(mask)
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
if oldaddr != 0 {
@@ -338,7 +339,7 @@ func Sigaltstack(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// these semantics apply to changing the signal stack via a
// ucontext during a signal handler.
if !t.SetSignalStack(alt) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
}
@@ -347,7 +348,7 @@ func Sigaltstack(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Pause implements linux syscall pause(2).
func Pause(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- return 0, nil, syserror.ConvertIntr(t.Block(nil), syserror.ERESTARTNOHAND)
+ return 0, nil, syserr.ConvertIntr(t.Block(nil), linuxerr.ERESTARTNOHAND)
}
// RtSigpending implements linux syscall rt_sigpending(2).
@@ -377,7 +378,7 @@ func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
return 0, nil, err
}
if !d.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
timeout = time.Duration(d.ToNsecCapped())
} else {
@@ -420,20 +421,20 @@ func RtSigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
// Deliver to the given task's thread group.
target := t.PIDNamespace().TaskWithID(pid)
if target == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
// If the sender is not the receiver, it can't use si_codes used by the
// kernel or SI_TKILL.
if (info.Code >= 0 || info.Code == linux.SI_TKILL) && target != t {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
if !mayKill(t, target, sig) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
- if err := target.SendGroupSignal(&info); err != syserror.ESRCH {
+ if err := target.SendGroupSignal(&info); !linuxerr.Equals(linuxerr.ESRCH, err) {
return 0, nil, err
}
}
@@ -449,7 +450,7 @@ func RtTgsigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
// N.B. Inconsistent with man page, linux actually rejects calls with
// tgid/tid <=0 by EINVAL. This isn't the same for all signal calls.
if tgid <= 0 || tid <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Copy in the info. See RtSigqueueinfo above.
@@ -463,17 +464,17 @@ func RtTgsigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
targetTG := t.PIDNamespace().ThreadGroupWithID(tgid)
target := t.PIDNamespace().TaskWithID(tid)
if targetTG == nil || target == nil || target.ThreadGroup() != targetTG {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
// If the sender is not the receiver, it can't use si_codes used by the
// kernel or SI_TKILL.
if (info.Code >= 0 || info.Code == linux.SI_TKILL) && target != t {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
if !mayKill(t, target, sig) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
return 0, nil, target.SendSignal(&info)
}
@@ -495,7 +496,7 @@ func RtSigsuspend(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
t.SetSavedSignalMask(oldmask)
// Perform the wait.
- return 0, nil, syserror.ConvertIntr(t.Block(nil), syserror.ERESTARTNOHAND)
+ return 0, nil, syserr.ConvertIntr(t.Block(nil), linuxerr.ERESTARTNOHAND)
}
// RestartSyscall implements the linux syscall restart_syscall(2).
@@ -511,7 +512,7 @@ func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
// function is never null by (re)initializing it with one that translates
// the restart into EINTR. We'll emulate that behaviour.
t.Debugf("Restart block missing in restart_syscall(2). Did ptrace inject a return value of ERESTART_RESTARTBLOCK?")
- return 0, nil, syserror.EINTR
+ return 0, nil, linuxerr.EINTR
}
// sharedSignalfd is shared between the two calls.
@@ -524,7 +525,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
// Always check for valid flags, even if not creating.
if flags&^(linux.SFD_NONBLOCK|linux.SFD_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is this a change to an existing signalfd?
@@ -533,7 +534,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
if fd != -1 {
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -544,7 +545,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
}
// Not a signalfd.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Create a new file.
diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go
index e07917613..50ddbc142 100644
--- a/pkg/sentry/syscalls/linux/sys_socket.go
+++ b/pkg/sentry/syscalls/linux/sys_socket.go
@@ -18,6 +18,7 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
@@ -29,7 +30,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/socket/control"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -117,7 +117,7 @@ type multipleMessageHeader64 struct {
// from the untrusted address space range.
func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
addrBuf := make([]byte, addrlen)
@@ -139,7 +139,7 @@ func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr h
}
if int32(bufLen) < 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Write the length unconditionally.
@@ -173,7 +173,7 @@ func Socket(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Check and initialize the flags.
if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Create the new socket.
@@ -205,7 +205,7 @@ func SocketPair(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Check and initialize the flags.
if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
fileFlags := fs.SettableFileFlags{
@@ -252,14 +252,14 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Capture address and call syscall implementation.
@@ -269,7 +269,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
}
blocking := !file.Flags().NonBlocking
- return 0, nil, syserror.ConvertIntr(s.Connect(t, a, blocking).ToError(), syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(s.Connect(t, a, blocking).ToError(), linuxerr.ERESTARTSYS)
}
// accept is the implementation of the accept syscall. It is called by accept
@@ -277,20 +277,20 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
// Call the syscall implementation for this socket, then copy the
@@ -300,12 +300,12 @@ func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr,
peerRequested := addrLen != 0
nfd, peer, peerLen, e := s.Accept(t, peerRequested, flags, blocking)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
if peerRequested {
// NOTE(magi): Linux does not give you an error if it can't
// write the data back out so neither do we.
- if err := writeAddress(t, peer, peerLen, addr, addrLen); err == syserror.EINVAL {
+ if err := writeAddress(t, peer, peerLen, addr, addrLen); linuxerr.Equals(linuxerr.EINVAL, err) {
return 0, err
}
}
@@ -342,14 +342,14 @@ func Bind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Capture address and call syscall implementation.
@@ -369,14 +369,14 @@ func Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if backlog > maxListenBacklog {
@@ -407,21 +407,21 @@ func Shutdown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Validate how, then call syscall implementation.
switch how {
case linux.SHUT_RD, linux.SHUT_WR, linux.SHUT_RDWR:
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, s.Shutdown(t, int(how)).ToError()
@@ -438,14 +438,14 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Read the length. Reject negative values.
@@ -454,7 +454,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return 0, nil, err
}
if optLen < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Call syscall implementation then copy both value and value len out.
@@ -519,21 +519,21 @@ func SetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if optLen < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if optLen > maxOptLen {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
buf := t.CopyScratchBuffer(int(optLen))
if _, err := t.CopyInBytes(optValAddr, buf); err != nil {
@@ -557,14 +557,14 @@ func GetSockName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Get the socket name and copy it to the caller.
@@ -585,14 +585,14 @@ func GetPeerName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Get the socket peer name and copy it to the caller.
@@ -612,25 +612,25 @@ func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if file.Flags().NonBlocking {
@@ -660,7 +660,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if vlen > linux.UIO_MAXIOV {
@@ -669,20 +669,20 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if file.Flags().NonBlocking {
@@ -697,7 +697,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, err
}
if !ts.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
deadline = t.Kernel().MonotonicClock().Now().Add(ts.ToDuration())
haveDeadline = true
@@ -717,7 +717,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := uint64(0); i < uint64(vlen); i++ {
mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
var n uintptr
if n, err = recvSingleMsg(t, s, mp, flags, haveDeadline, deadline); err != nil {
@@ -727,7 +727,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Copy the received length to the caller.
lp, ok := mp.AddLength(messageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil {
break
@@ -749,7 +749,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags
}
if msg.IovLen > linux.UIO_MAXIOV {
- return 0, syserror.EMSGSIZE
+ return 0, linuxerr.EMSGSIZE
}
dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
@@ -762,7 +762,7 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags
if msg.ControlLen == 0 && msg.NameLen == 0 {
n, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)
if err != nil {
- return 0, syserror.ConvertIntr(err.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(err.ToError(), linuxerr.ERESTARTSYS)
}
if !cms.Unix.Empty() {
mflags |= linux.MSG_CTRUNC
@@ -780,11 +780,11 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags
}
if msg.ControlLen > maxControlLen {
- return 0, syserror.ENOBUFS
+ return 0, linuxerr.ENOBUFS
}
n, mflags, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
defer cms.Release(t)
@@ -829,25 +829,25 @@ func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr hostarch.Addr, flags
// recvfrom and recv syscall handlers.
func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CONFIRM) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
if file.Flags().NonBlocking {
@@ -873,7 +873,7 @@ func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, fla
n, _, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0)
cm.Release(t)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
// Copy the address to the caller.
@@ -907,25 +907,25 @@ func SendMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if file.Flags().NonBlocking {
@@ -945,7 +945,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if vlen > linux.UIO_MAXIOV {
@@ -955,19 +955,19 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if file.Flags().NonBlocking {
@@ -979,7 +979,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := uint64(0); i < uint64(vlen); i++ {
mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
var n uintptr
if n, err = sendSingleMsg(t, s, file, mp, flags); err != nil {
@@ -989,7 +989,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Copy the received length to the caller.
lp, ok := mp.AddLength(messageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil {
break
@@ -1014,7 +1014,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostar
if msg.ControlLen > 0 {
// Put an upper bound to prevent large allocations.
if msg.ControlLen > maxControlLen {
- return 0, syserror.ENOBUFS
+ return 0, linuxerr.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
@@ -1034,7 +1034,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostar
// Read data then call the sendmsg implementation.
if msg.IovLen > linux.UIO_MAXIOV {
- return 0, syserror.EMSGSIZE
+ return 0, linuxerr.EMSGSIZE
}
src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
@@ -1059,7 +1059,7 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostar
// Call the syscall implementation.
n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, controlMessages)
- err = handleIOError(t, n != 0, e.ToError(), syserror.ERESTARTSYS, "sendmsg", file)
+ err = handleIOError(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendmsg", file)
// Control messages should be released on error as well as for zero-length
// messages, which are discarded by the receiver.
if n == 0 || err != nil {
@@ -1073,20 +1073,20 @@ func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr hostar
func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFile(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.FileOperations.(socket.Socket)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
if file.Flags().NonBlocking {
@@ -1121,7 +1121,7 @@ func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags
// Call the syscall implementation.
n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, socket.ControlMessages{Unix: control.New(t, s, nil)})
- return uintptr(n), handleIOError(t, n != 0, e.ToError(), syserror.ERESTARTSYS, "sendto", file)
+ return uintptr(n), handleIOError(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendto", file)
}
// SendTo implements the linux syscall sendto(2).
diff --git a/pkg/sentry/syscalls/linux/sys_splice.go b/pkg/sentry/syscalls/linux/sys_splice.go
index 134051124..8c8847efa 100644
--- a/pkg/sentry/syscalls/linux/sys_splice.go
+++ b/pkg/sentry/syscalls/linux/sys_splice.go
@@ -16,18 +16,18 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
// doSplice implements a blocking splice operation.
func doSplice(t *kernel.Task, outFile, inFile *fs.File, opts fs.SpliceOpts, nonBlocking bool) (int64, error) {
if opts.Length < 0 || opts.SrcStart < 0 || opts.DstStart < 0 || (opts.SrcStart+opts.Length < 0) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if opts.Length == 0 {
return 0, nil
@@ -45,9 +45,9 @@ func doSplice(t *kernel.Task, outFile, inFile *fs.File, opts fs.SpliceOpts, nonB
for {
n, err = fs.Splice(t, outFile, inFile, opts)
- if n != 0 || err != syserror.ErrWouldBlock {
+ if n != 0 || err != linuxerr.ErrWouldBlock {
break
- } else if err == syserror.ErrWouldBlock && nonBlocking {
+ } else if err == linuxerr.ErrWouldBlock && nonBlocking {
break
}
@@ -105,33 +105,33 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Get files.
inFile := t.GetFile(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
if !inFile.Flags().Read {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
outFile := t.GetFile(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
if !outFile.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Verify that the outfile Append flag is not set.
if outFile.Flags().Append {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Verify that we have a regular infile. This is a requirement; the
// same check appears in Linux (fs/splice.c:splice_direct_to_actor).
if !fs.IsRegular(inFile.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var (
@@ -142,7 +142,7 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Verify that when offset address is not null, infile must be
// seekable. The fs.Splice routine itself validates basic read.
if !inFile.Flags().Pread {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Copy in the offset.
@@ -176,7 +176,7 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// We can only pass a single file to handleIOError, so pick inFile
// arbitrarily. This is used only for debugging purposes.
- return uintptr(n), nil, handleIOError(t, false, err, syserror.ERESTARTSYS, "sendfile", inFile)
+ return uintptr(n), nil, handleIOError(t, false, err, linuxerr.ERESTARTSYS, "sendfile", inFile)
}
// Splice implements splice(2).
@@ -190,19 +190,19 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Check for invalid flags.
if flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get files.
outFile := t.GetFile(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
inFile := t.GetFile(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
@@ -226,11 +226,11 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
switch {
case fs.IsPipe(inFileAttr) && !fs.IsPipe(outFileAttr):
if inOffset != 0 {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
if outOffset != 0 {
if !outFile.Flags().Pwrite {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var offset int64
@@ -244,11 +244,11 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
case !fs.IsPipe(inFileAttr) && fs.IsPipe(outFileAttr):
if outOffset != 0 {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
if inOffset != 0 {
if !inFile.Flags().Pread {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var offset int64
@@ -262,15 +262,15 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
case fs.IsPipe(inFileAttr) && fs.IsPipe(outFileAttr):
if inOffset != 0 || outOffset != 0 {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// We may not refer to the same pipe; otherwise it's a continuous loop.
if inFileAttr.InodeID == outFileAttr.InodeID {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Splice data.
@@ -286,7 +286,7 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
// See above; inFile is chosen arbitrarily here.
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "splice", inFile)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "splice", inFile)
}
// Tee imlements tee(2).
@@ -298,30 +298,30 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
// Check for invalid flags.
if flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get files.
outFile := t.GetFile(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
inFile := t.GetFile(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
// All files must be pipes.
if !fs.IsPipe(inFile.Dirent.Inode.StableAttr) || !fs.IsPipe(outFile.Dirent.Inode.StableAttr) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// We may not refer to the same pipe; see above.
if inFile.Dirent.Inode.StableAttr.InodeID == outFile.Dirent.Inode.StableAttr.InodeID {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// The operation is non-blocking if anything is non-blocking.
@@ -339,5 +339,5 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
}
// See above; inFile is chosen arbitrarily here.
- return uintptr(n), nil, handleIOError(t, false, err, syserror.ERESTARTSYS, "tee", inFile)
+ return uintptr(n), nil, handleIOError(t, false, err, linuxerr.ERESTARTSYS, "tee", inFile)
}
diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go
index 2338ba44b..3da385c66 100644
--- a/pkg/sentry/syscalls/linux/sys_stat.go
+++ b/pkg/sentry/syscalls/linux/sys_stat.go
@@ -16,11 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -56,7 +56,7 @@ func Fstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// Annoying. What's wrong with fstat?
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -98,7 +98,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -108,7 +108,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// stat implements stat from the given *fs.Dirent.
func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr hostarch.Addr) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
uattr, err := d.Inode.UnstableAttr(t)
if err != nil {
@@ -139,13 +139,13 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
statxAddr := args[4].Pointer()
if mask&linux.STATX__RESERVED != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if flags&^(linux.AT_SYMLINK_NOFOLLOW|linux.AT_EMPTY_PATH|linux.AT_STATX_SYNC_TYPE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if flags&linux.AT_STATX_SYNC_TYPE == linux.AT_STATX_SYNC_TYPE {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, dirPath, err := copyInPath(t, pathAddr, flags&linux.AT_EMPTY_PATH != 0)
@@ -156,7 +156,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
if path == "" {
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
uattr, err := file.UnstableAttr(t)
@@ -170,7 +170,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, fileOpOn(t, fd, path, resolve, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
uattr, err := d.Inode.UnstableAttr(t)
if err != nil {
@@ -247,7 +247,7 @@ func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
diff --git a/pkg/sentry/syscalls/linux/sys_stat_amd64.go b/pkg/sentry/syscalls/linux/sys_stat_amd64.go
index 0a04a6113..e38066ea8 100644
--- a/pkg/sentry/syscalls/linux/sys_stat_amd64.go
+++ b/pkg/sentry/syscalls/linux/sys_stat_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package linux
diff --git a/pkg/sentry/syscalls/linux/sys_stat_arm64.go b/pkg/sentry/syscalls/linux/sys_stat_arm64.go
index 5a3b1bfad..b2ea390c5 100644
--- a/pkg/sentry/syscalls/linux/sys_stat_arm64.go
+++ b/pkg/sentry/syscalls/linux/sys_stat_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package linux
diff --git a/pkg/sentry/syscalls/linux/sys_sync.go b/pkg/sentry/syscalls/linux/sys_sync.go
index 5ebd4461f..0c22599bf 100644
--- a/pkg/sentry/syscalls/linux/sys_sync.go
+++ b/pkg/sentry/syscalls/linux/sys_sync.go
@@ -16,10 +16,11 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// LINT.IfChange
@@ -37,7 +38,7 @@ func Syncfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -52,12 +53,12 @@ func Fsync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
err := file.Fsync(t, 0, fs.FileMaxOffset, fs.SyncAll)
- return 0, nil, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
// Fdatasync implements linux syscall fdatasync(2).
@@ -68,12 +69,12 @@ func Fdatasync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
err := file.Fsync(t, 0, fs.FileMaxOffset, fs.SyncData)
- return 0, nil, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
// SyncFileRange implements linux syscall sync_file_rage(2)
@@ -86,13 +87,13 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
uflags := args[3].Uint()
if offset < 0 || offset+nbytes < offset {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if uflags&^(linux.SYNC_FILE_RANGE_WAIT_BEFORE|
linux.SYNC_FILE_RANGE_WRITE|
linux.SYNC_FILE_RANGE_WAIT_AFTER) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if nbytes == 0 {
@@ -101,7 +102,7 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -111,7 +112,7 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
if uflags&linux.SYNC_FILE_RANGE_WAIT_BEFORE != 0 &&
uflags&linux.SYNC_FILE_RANGE_WAIT_AFTER == 0 {
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
// SYNC_FILE_RANGE_WRITE initiates write-out of all dirty pages in the
@@ -136,7 +137,7 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
err = file.Fsync(t, offset, fs.FileMaxOffset, fs.SyncData)
}
- return 0, nil, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
// LINT.ThenChange(vfs2/sync.go)
diff --git a/pkg/sentry/syscalls/linux/sys_syslog.go b/pkg/sentry/syscalls/linux/sys_syslog.go
index 40c8bb061..15acb2b8b 100644
--- a/pkg/sentry/syscalls/linux/sys_syslog.go
+++ b/pkg/sentry/syscalls/linux/sys_syslog.go
@@ -15,9 +15,9 @@
package linux
import (
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -40,7 +40,7 @@ func Syslog(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
switch command {
case _SYSLOG_ACTION_READ_ALL:
if size < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if size > logBufLen {
size = logBufLen
@@ -56,6 +56,6 @@ func Syslog(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case _SYSLOG_ACTION_SIZE_BUFFER:
return logBufLen, nil, nil
default:
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go
index 0d5056303..d74173c56 100644
--- a/pkg/sentry/syscalls/linux/sys_thread.go
+++ b/pkg/sentry/syscalls/linux/sys_thread.go
@@ -17,8 +17,8 @@ package linux
import (
"path"
- "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -27,15 +27,9 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/sched"
"gvisor.dev/gvisor/pkg/sentry/loader"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
-const (
- // exitSignalMask is the signal mask to be sent at exit. Same as CSIGNAL in linux.
- exitSignalMask = 0xff
-)
-
var (
// ExecMaxTotalSize is the maximum length of all argv and envv entries.
//
@@ -112,11 +106,11 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host
}
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
atEmptyPath := flags&linux.AT_EMPTY_PATH != 0
if !atEmptyPath && len(pathname) == 0 {
- return 0, nil, syserror.ENOENT
+ return 0, nil, linuxerr.ENOENT
}
resolveFinal := flags&linux.AT_SYMLINK_NOFOLLOW == 0
@@ -135,7 +129,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host
// Need to extract the given FD.
f, fdFlags := t.FDTable().Get(dirFD)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
closeOnExec = fdFlags.CloseOnExec
@@ -154,7 +148,7 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host
wd = f.Dirent
wd.IncRef()
if !fs.IsDir(wd.Inode.StableAttr) {
- return 0, nil, syserror.ENOTDIR
+ return 0, nil, linuxerr.ENOTDIR
}
}
}
@@ -187,47 +181,30 @@ func execveat(t *kernel.Task, dirFD int32, pathnameAddr, argvAddr, envvAddr host
// Exit implements linux syscall exit(2).
func Exit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- status := int(args[0].Int())
- t.PrepareExit(kernel.ExitStatus{Code: status})
+ status := args[0].Int()
+ t.PrepareExit(linux.WaitStatusExit(status & 0xff))
return 0, kernel.CtrlDoExit, nil
}
// ExitGroup implements linux syscall exit_group(2).
func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- status := int(args[0].Int())
- t.PrepareGroupExit(kernel.ExitStatus{Code: status})
+ status := args[0].Int()
+ t.PrepareGroupExit(linux.WaitStatusExit(status & 0xff))
return 0, kernel.CtrlDoExit, nil
}
// clone is used by Clone, Fork, and VFork.
func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {
- opts := kernel.CloneOptions{
- SharingOptions: kernel.SharingOptions{
- NewAddressSpace: flags&linux.CLONE_VM == 0,
- NewSignalHandlers: flags&linux.CLONE_SIGHAND == 0,
- NewThreadGroup: flags&linux.CLONE_THREAD == 0,
- TerminationSignal: linux.Signal(flags & exitSignalMask),
- NewPIDNamespace: flags&linux.CLONE_NEWPID == linux.CLONE_NEWPID,
- NewUserNamespace: flags&linux.CLONE_NEWUSER == linux.CLONE_NEWUSER,
- NewNetworkNamespace: flags&linux.CLONE_NEWNET == linux.CLONE_NEWNET,
- NewFiles: flags&linux.CLONE_FILES == 0,
- NewFSContext: flags&linux.CLONE_FS == 0,
- NewUTSNamespace: flags&linux.CLONE_NEWUTS == linux.CLONE_NEWUTS,
- NewIPCNamespace: flags&linux.CLONE_NEWIPC == linux.CLONE_NEWIPC,
- },
- Stack: stack,
- SetTLS: flags&linux.CLONE_SETTLS == linux.CLONE_SETTLS,
- TLS: tls,
- ChildClearTID: flags&linux.CLONE_CHILD_CLEARTID == linux.CLONE_CHILD_CLEARTID,
- ChildSetTID: flags&linux.CLONE_CHILD_SETTID == linux.CLONE_CHILD_SETTID,
- ChildTID: childTID,
- ParentSetTID: flags&linux.CLONE_PARENT_SETTID == linux.CLONE_PARENT_SETTID,
- ParentTID: parentTID,
- Vfork: flags&linux.CLONE_VFORK == linux.CLONE_VFORK,
- Untraced: flags&linux.CLONE_UNTRACED == linux.CLONE_UNTRACED,
- InheritTracer: flags&linux.CLONE_PTRACE == linux.CLONE_PTRACE,
- }
- ntid, ctrl, err := t.Clone(&opts)
+ args := linux.CloneArgs{
+ Flags: uint64(uint32(flags) &^ linux.CSIGNAL),
+ Pidfd: uint64(parentTID),
+ ChildTID: uint64(childTID),
+ ParentTID: uint64(parentTID),
+ ExitSignal: uint64(flags & linux.CSIGNAL),
+ Stack: uint64(stack),
+ TLS: uint64(tls),
+ }
+ ntid, ctrl, err := t.Clone(&args)
return uintptr(ntid), ctrl, err
}
@@ -260,13 +237,13 @@ func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error {
wopts.NonCloneTasks = true
wopts.CloneTasks = true
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if options&linux.WCONTINUED != 0 {
wopts.Events |= kernel.EventGroupContinue
}
if options&linux.WNOHANG == 0 {
- wopts.BlockInterruptErr = syserror.ERESTARTSYS
+ wopts.BlockInterruptErr = linuxerr.ERESTARTSYS
}
if options&linux.WNOTHREAD == 0 {
wopts.SiblingChildren = true
@@ -277,7 +254,7 @@ func parseCommonWaitOptions(wopts *kernel.WaitOptions, options int) error {
// wait4 waits for the given child process to exit.
func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusageAddr hostarch.Addr) (uintptr, error) {
if options&^(linux.WNOHANG|linux.WUNTRACED|linux.WCONTINUED|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
wopts := kernel.WaitOptions{
Events: kernel.EventExit | kernel.EventTraceeStop,
@@ -315,7 +292,7 @@ func wait4(t *kernel.Task, pid int, statusAddr hostarch.Addr, options int, rusag
return 0, err
}
if statusAddr != 0 {
- if _, err := primitive.CopyUint32Out(t, statusAddr, wr.Status); err != nil {
+ if _, err := primitive.CopyUint32Out(t, statusAddr, uint32(wr.Status)); err != nil {
return 0, err
}
}
@@ -358,10 +335,10 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
rusageAddr := args[4].Pointer()
if options&^(linux.WNOHANG|linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED|linux.WNOWAIT|linux.WNOTHREAD|linux.WALL|linux.WCLONE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if options&(linux.WEXITED|linux.WSTOPPED|linux.WCONTINUED) == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
wopts := kernel.WaitOptions{
Events: kernel.EventTraceeStop,
@@ -374,7 +351,7 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
case linux.P_PGID:
wopts.SpecificPGID = kernel.ProcessGroupID(id)
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if err := parseCommonWaitOptions(&wopts, options); err != nil {
@@ -418,23 +395,22 @@ func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
}
si.SetPID(int32(wr.TID))
si.SetUID(int32(wr.UID))
- // TODO(b/73541790): convert kernel.ExitStatus to functions and make
- // WaitResult.Status a linux.WaitStatus.
- s := unix.WaitStatus(wr.Status)
+ s := wr.Status
switch {
case s.Exited():
si.Code = linux.CLD_EXITED
si.SetStatus(int32(s.ExitStatus()))
case s.Signaled():
- si.Code = linux.CLD_KILLED
- si.SetStatus(int32(s.Signal()))
- case s.CoreDump():
- si.Code = linux.CLD_DUMPED
- si.SetStatus(int32(s.Signal()))
+ if s.CoreDumped() {
+ si.Code = linux.CLD_DUMPED
+ } else {
+ si.Code = linux.CLD_KILLED
+ }
+ si.SetStatus(int32(s.TerminationSignal()))
case s.Stopped():
if wr.Event == kernel.EventTraceeStop {
si.Code = linux.CLD_TRAPPED
- si.SetStatus(int32(s.TrapCause()))
+ si.SetStatus(int32(s.PtraceEvent()))
} else {
si.Code = linux.CLD_STOPPED
si.SetStatus(int32(s.StopSignal()))
@@ -461,29 +437,16 @@ func SetTidAddress(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
// Unshare implements linux syscall unshare(2).
func Unshare(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
flags := args[0].Int()
- opts := kernel.SharingOptions{
- NewAddressSpace: flags&linux.CLONE_VM == linux.CLONE_VM,
- NewSignalHandlers: flags&linux.CLONE_SIGHAND == linux.CLONE_SIGHAND,
- NewThreadGroup: flags&linux.CLONE_THREAD == linux.CLONE_THREAD,
- NewPIDNamespace: flags&linux.CLONE_NEWPID == linux.CLONE_NEWPID,
- NewUserNamespace: flags&linux.CLONE_NEWUSER == linux.CLONE_NEWUSER,
- NewNetworkNamespace: flags&linux.CLONE_NEWNET == linux.CLONE_NEWNET,
- NewFiles: flags&linux.CLONE_FILES == linux.CLONE_FILES,
- NewFSContext: flags&linux.CLONE_FS == linux.CLONE_FS,
- NewUTSNamespace: flags&linux.CLONE_NEWUTS == linux.CLONE_NEWUTS,
- NewIPCNamespace: flags&linux.CLONE_NEWIPC == linux.CLONE_NEWIPC,
- }
// "CLONE_NEWPID automatically implies CLONE_THREAD as well." - unshare(2)
- if opts.NewPIDNamespace {
- opts.NewThreadGroup = true
+ if flags&linux.CLONE_NEWPID != 0 {
+ flags |= linux.CLONE_THREAD
}
// "... specifying CLONE_NEWUSER automatically implies CLONE_THREAD. Since
// Linux 3.9, CLONE_NEWUSER also automatically implies CLONE_FS."
- if opts.NewUserNamespace {
- opts.NewThreadGroup = true
- opts.NewFSContext = true
+ if flags&linux.CLONE_NEWUSER != 0 {
+ flags |= linux.CLONE_THREAD | linux.CLONE_FS
}
- return 0, nil, t.Unshare(&opts)
+ return 0, nil, t.Unshare(flags)
}
// SchedYield implements linux syscall sched_yield(2).
@@ -504,7 +467,7 @@ func SchedSetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
} else {
task = t.PIDNamespace().TaskWithID(kernel.ThreadID(tid))
if task == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
}
@@ -528,7 +491,7 @@ func SchedGetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
// in an array of "unsigned long" so the buffer needs to
// be a multiple of the word size.
if size&(t.Arch().Width()-1) > 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var task *kernel.Task
@@ -537,7 +500,7 @@ func SchedGetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
} else {
task = t.PIDNamespace().TaskWithID(kernel.ThreadID(tid))
if task == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
}
@@ -545,7 +508,7 @@ func SchedGetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *ker
// The buffer needs to be big enough to hold a cpumask with
// all possible cpus.
if size < mask.Size() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
_, err := t.CopyOutBytes(maskAddr, mask)
@@ -590,16 +553,16 @@ func Setpgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if pid != 0 {
ot := t.PIDNamespace().TaskWithID(pid)
if ot == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
tg = ot.ThreadGroup()
if tg.Leader() != ot {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Setpgid only operates on child threadgroups.
if tg != t.ThreadGroup() && (tg.Leader().Parent() == nil || tg.Leader().Parent().ThreadGroup() != t.ThreadGroup()) {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
}
@@ -609,7 +572,7 @@ func Setpgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if pgid == 0 {
pgid = defaultPGID
} else if pgid < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// If the pgid is the same as the group, then create a new one. Otherwise,
@@ -654,7 +617,7 @@ func Getpgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
target := t.PIDNamespace().TaskWithID(tid)
if target == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
return uintptr(t.PIDNamespace().IDOfProcessGroup(target.ThreadGroup().ProcessGroup())), nil, nil
@@ -674,7 +637,7 @@ func Getsid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
target := t.PIDNamespace().TaskWithID(tid)
if target == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
return uintptr(t.PIDNamespace().IDOfSession(target.ThreadGroup().Session())), nil, nil
@@ -698,7 +661,7 @@ func Getpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
}
if task == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
// From kernel/sys.c:getpriority:
@@ -712,7 +675,7 @@ func Getpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// PRIO_USER and PRIO_PGRP have no further implementation yet.
return 0, nil, nil
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
@@ -744,7 +707,7 @@ func Setpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
}
if task == nil {
- return 0, nil, syserror.ESRCH
+ return 0, nil, linuxerr.ESRCH
}
task.SetNiceness(niceval)
@@ -754,7 +717,7 @@ func Setpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// PRIO_USER and PRIO_PGRP have no further implementation yet.
return 0, nil, nil
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
diff --git a/pkg/sentry/syscalls/linux/sys_time.go b/pkg/sentry/syscalls/linux/sys_time.go
index 5c3b3dee2..4adc8b8a4 100644
--- a/pkg/sentry/syscalls/linux/sys_time.go
+++ b/pkg/sentry/syscalls/linux/sys_time.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
// The most significant 29 bits hold either a pid or a file descriptor.
@@ -75,7 +75,7 @@ func ClockGetres(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
}
if _, err := getClock(t, clockID); err != nil {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if addr == 0 {
@@ -94,12 +94,12 @@ type cpuClocker interface {
func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) {
if clockID < 0 {
if !isValidCPUClock(clockID) {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
targetTask := targetTask(t, clockID)
if targetTask == nil {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
var target cpuClocker
@@ -116,7 +116,7 @@ func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) {
// CPUCLOCK_SCHED is approximated by CPUCLOCK_PROF.
return target.CPUClock(), nil
default:
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
}
@@ -138,7 +138,7 @@ func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) {
case linux.CLOCK_THREAD_CPUTIME_ID:
return t.CPUClock(), nil
default:
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
}
@@ -157,7 +157,7 @@ func ClockGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// ClockSettime implements linux syscall clock_settime(2).
func ClockSettime(*kernel.Task, arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
// Time implements linux syscall time(2).
@@ -209,11 +209,11 @@ func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, end ktime.Time, rem host
timer.Destroy()
- switch err {
- case syserror.ETIMEDOUT:
+ switch {
+ case linuxerr.Equals(linuxerr.ETIMEDOUT, err):
// Slept for entire timeout.
return nil
- case syserror.ErrInterrupted:
+ case err == linuxerr.ErrInterrupted:
// Interrupted.
remaining := end.Sub(c.Now())
if remaining <= 0 {
@@ -234,9 +234,9 @@ func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, end ktime.Time, rem host
end: end,
rem: rem,
})
- return syserror.ERESTART_RESTARTBLOCK
+ return linuxerr.ERESTART_RESTARTBLOCK
}
- return syserror.ERESTARTNOHAND
+ return linuxerr.ERESTARTNOHAND
default:
panic(fmt.Sprintf("Impossible BlockWithTimer error %v", err))
}
@@ -253,7 +253,7 @@ func Nanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
if !ts.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Just like linux, we cap the timeout with the max number that int64 can
@@ -276,7 +276,7 @@ func ClockNanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
}
if !req.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Only allow clock constants also allowed by Linux.
@@ -284,7 +284,7 @@ func ClockNanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
if clockID != linux.CLOCK_REALTIME &&
clockID != linux.CLOCK_MONOTONIC &&
clockID != linux.CLOCK_PROCESS_CPUTIME_ID {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/sys_timer.go b/pkg/sentry/syscalls/linux/sys_timer.go
index 45eef4feb..d39a0a6f5 100644
--- a/pkg/sentry/syscalls/linux/sys_timer.go
+++ b/pkg/sentry/syscalls/linux/sys_timer.go
@@ -18,9 +18,9 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
const nsecPerSec = int64(time.Second)
@@ -29,7 +29,7 @@ const nsecPerSec = int64(time.Second)
func Getitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
if t.Arch().Width() != 8 {
// Definition of linux.ItimerVal assumes 64-bit architecture.
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
timerID := args[0].Int()
@@ -51,7 +51,7 @@ func Getitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
func Setitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
if t.Arch().Width() != 8 {
// Definition of linux.ItimerVal assumes 64-bit architecture.
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
timerID := args[0].Int()
diff --git a/pkg/sentry/syscalls/linux/sys_timerfd.go b/pkg/sentry/syscalls/linux/sys_timerfd.go
index cadd9d348..4eeb94231 100644
--- a/pkg/sentry/syscalls/linux/sys_timerfd.go
+++ b/pkg/sentry/syscalls/linux/sys_timerfd.go
@@ -16,12 +16,12 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/timerfd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
// TimerfdCreate implements Linux syscall timerfd_create(2).
@@ -30,7 +30,7 @@ func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
flags := args[1].Int()
if flags&^(linux.TFD_CLOEXEC|linux.TFD_NONBLOCK) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var c ktime.Clock
@@ -40,7 +40,7 @@ func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
case linux.CLOCK_MONOTONIC, linux.CLOCK_BOOTTIME:
c = t.Kernel().MonotonicClock()
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
f := timerfd.NewFile(t, c)
defer f.DecRef(t)
@@ -66,18 +66,18 @@ func TimerfdSettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
oldValAddr := args[3].Pointer()
if flags&^(linux.TFD_TIMER_ABSTIME) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
tf, ok := f.FileOperations.(*timerfd.TimerOperations)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var newVal linux.Itimerspec
@@ -105,13 +105,13 @@ func TimerfdGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
tf, ok := f.FileOperations.(*timerfd.TimerOperations)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
tm, s := tf.GetTime()
diff --git a/pkg/sentry/syscalls/linux/sys_tls_amd64.go b/pkg/sentry/syscalls/linux/sys_tls_amd64.go
index 6ddd30d5c..bde672d67 100644
--- a/pkg/sentry/syscalls/linux/sys_tls_amd64.go
+++ b/pkg/sentry/syscalls/linux/sys_tls_amd64.go
@@ -12,16 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//+build amd64
+//go:build amd64
+// +build amd64
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ArchPrctl implements linux syscall arch_prctl(2).
@@ -37,18 +38,18 @@ func ArchPrctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
return 0, nil, err
}
default:
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
case linux.ARCH_SET_FS:
fsbase := args[1].Uint64()
if !t.Arch().SetTLS(uintptr(fsbase)) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
case linux.ARCH_GET_GS, linux.ARCH_SET_GS:
t.Kernel().EmitUnimplementedEvent(t)
fallthrough
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
diff --git a/pkg/sentry/syscalls/linux/sys_tls_arm64.go b/pkg/sentry/syscalls/linux/sys_tls_arm64.go
index fb08a356e..dfa684387 100644
--- a/pkg/sentry/syscalls/linux/sys_tls_arm64.go
+++ b/pkg/sentry/syscalls/linux/sys_tls_arm64.go
@@ -12,17 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//+build arm64
+//go:build arm64
+// +build arm64
package linux
import (
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ArchPrctl is not defined for ARM64.
func ArchPrctl(*kernel.Task, arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
diff --git a/pkg/sentry/syscalls/linux/sys_utsname.go b/pkg/sentry/syscalls/linux/sys_utsname.go
index 66c5974f5..4e945d2c0 100644
--- a/pkg/sentry/syscalls/linux/sys_utsname.go
+++ b/pkg/sentry/syscalls/linux/sys_utsname.go
@@ -16,9 +16,9 @@ package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Uname implements linux syscall uname.
@@ -57,10 +57,10 @@ func Setdomainname(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
utsns := t.UTSNamespace()
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
if size < 0 || size > linux.UTSLen {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
name, err := t.CopyInString(nameAddr, int(size))
@@ -79,10 +79,10 @@ func Sethostname(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
utsns := t.UTSNamespace()
if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
if size < 0 || size > linux.UTSLen {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
name := make([]byte, size)
diff --git a/pkg/sentry/syscalls/linux/sys_write.go b/pkg/sentry/syscalls/linux/sys_write.go
index 95bfe6606..4a4ef5046 100644
--- a/pkg/sentry/syscalls/linux/sys_write.go
+++ b/pkg/sentry/syscalls/linux/sys_write.go
@@ -18,12 +18,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/socket"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -46,19 +46,19 @@ func Write(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is writable.
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -71,7 +71,7 @@ func Write(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
n, err := writev(t, file, src)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "write", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "write", file)
}
// Pwrite64 implements linux syscall pwrite64(2).
@@ -83,29 +83,29 @@ func Pwrite64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is writing at an offset supported?
if !file.Flags().Pwrite {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Check that the file is writable.
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -118,7 +118,7 @@ func Pwrite64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
n, err := pwritev(t, file, src, offset)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "pwrite64", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "pwrite64", file)
}
// Writev implements linux syscall writev(2).
@@ -129,13 +129,13 @@ func Writev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is writable.
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Read the iovecs that specify the source of the write.
@@ -148,7 +148,7 @@ func Writev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := writev(t, file, src)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "writev", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "writev", file)
}
// Pwritev implements linux syscall pwritev(2).
@@ -160,23 +160,23 @@ func Pwritev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is writing at an offset supported?
if !file.Flags().Pwrite {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Check that the file is writable.
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Read the iovecs that specify the source of the write.
@@ -189,7 +189,7 @@ func Pwritev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := pwritev(t, file, src, offset)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "pwritev", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "pwritev", file)
}
// Pwritev2 implements linux syscall pwritev2(2).
@@ -208,34 +208,34 @@ func Pwritev2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
flags := int(args[5].Int())
if int(args[4].Int())&0x4 == 1 {
- return 0, nil, syserror.EACCES
+ return 0, nil, linuxerr.EACCES
}
file := t.GetFile(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < -1 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is writing at an offset supported?
if offset > -1 && !file.Flags().Pwrite {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
// Note: gVisor does not implement the RWF_HIPRI feature, but the flag is
// accepted as a valid flag argument for pwritev2.
if flags&^linux.RWF_VALID != 0 {
- return uintptr(flags), nil, syserror.EOPNOTSUPP
+ return uintptr(flags), nil, linuxerr.EOPNOTSUPP
}
// Check that the file is writeable.
if !file.Flags().Write {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Read the iovecs that specify the source of the write.
@@ -250,17 +250,17 @@ func Pwritev2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
if offset == -1 {
n, err := writev(t, file, src)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "pwritev2", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "pwritev2", file)
}
n, err := pwritev(t, file, src, offset)
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, handleIOError(t, n != 0, err, syserror.ERESTARTSYS, "pwritev2", file)
+ return uintptr(n), nil, handleIOError(t, n != 0, err, linuxerr.ERESTARTSYS, "pwritev2", file)
}
func writev(t *kernel.Task, f *fs.File, src usermem.IOSequence) (int64, error) {
n, err := f.Writev(t, src)
- if err != syserror.ErrWouldBlock || f.Flags().NonBlocking {
+ if err != linuxerr.ErrWouldBlock || f.Flags().NonBlocking {
if n > 0 {
// Queue notification if we wrote anything.
f.Dirent.InotifyEvent(linux.IN_MODIFY, 0)
@@ -273,7 +273,7 @@ func writev(t *kernel.Task, f *fs.File, src usermem.IOSequence) (int64, error) {
var deadline ktime.Time
if s, ok := f.FileOperations.(socket.Socket); ok {
dl := s.SendTimeout()
- if dl < 0 && err == syserror.ErrWouldBlock {
+ if dl < 0 && err == linuxerr.ErrWouldBlock {
return n, err
}
if dl > 0 {
@@ -295,14 +295,14 @@ func writev(t *kernel.Task, f *fs.File, src usermem.IOSequence) (int64, error) {
// anything other than "would block".
n, err = f.Writev(t, src)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -320,7 +320,7 @@ func writev(t *kernel.Task, f *fs.File, src usermem.IOSequence) (int64, error) {
func pwritev(t *kernel.Task, f *fs.File, src usermem.IOSequence, offset int64) (int64, error) {
n, err := f.Pwritev(t, src, offset)
- if err != syserror.ErrWouldBlock || f.Flags().NonBlocking {
+ if err != linuxerr.ErrWouldBlock || f.Flags().NonBlocking {
if n > 0 {
// Queue notification if we wrote anything.
f.Dirent.InotifyEvent(linux.IN_MODIFY, 0)
@@ -341,7 +341,7 @@ func pwritev(t *kernel.Task, f *fs.File, src usermem.IOSequence, offset int64) (
// anything other than "would block".
n, err = f.Pwritev(t, src, offset+total)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
diff --git a/pkg/sentry/syscalls/linux/sys_xattr.go b/pkg/sentry/syscalls/linux/sys_xattr.go
index 28ad6a60e..baaf31191 100644
--- a/pkg/sentry/syscalls/linux/sys_xattr.go
+++ b/pkg/sentry/syscalls/linux/sys_xattr.go
@@ -18,11 +18,11 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// LINT.IfChange
@@ -47,7 +47,7 @@ func FGetXattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// TODO(b/113957122): Return EBADF if the fd was opened with O_PATH.
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
@@ -73,7 +73,7 @@ func getXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
n := 0
err = fileOpOn(t, linux.AT_FDCWD, path, resolveSymlink, func(_ *fs.Dirent, d *fs.Dirent, _ uint) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
n, err = getXattr(t, d, nameAddr, valueAddr, size)
@@ -99,7 +99,7 @@ func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, s
// TODO(b/148380782): Support xattrs in namespaces other than "user".
if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
// If getxattr(2) is called with size 0, the size of the value will be
@@ -116,7 +116,7 @@ func getXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, s
}
n := len(value)
if uint64(n) > requestedSize {
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
// Don't copy out the attribute value if size is 0.
@@ -151,7 +151,7 @@ func FSetXattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// TODO(b/113957122): Return EBADF if the fd was opened with O_PATH.
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
@@ -172,7 +172,7 @@ func setXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, resolveSymlink, func(_ *fs.Dirent, d *fs.Dirent, _ uint) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
return setXattr(t, d, nameAddr, valueAddr, uint64(size), flags)
@@ -182,7 +182,7 @@ func setXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlink
// setXattr implements setxattr(2) from the given *fs.Dirent.
func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, size uint64, flags uint32) error {
if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
name, err := copyInXattrName(t, nameAddr)
@@ -195,7 +195,7 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, s
}
if size > linux.XATTR_SIZE_MAX {
- return syserror.E2BIG
+ return linuxerr.E2BIG
}
buf := make([]byte, size)
if _, err := t.CopyInBytes(valueAddr, buf); err != nil {
@@ -204,7 +204,7 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, s
value := string(buf)
if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
if err := d.Inode.SetXattr(t, d, name, value, flags); err != nil {
@@ -217,13 +217,13 @@ func setXattr(t *kernel.Task, d *fs.Dirent, nameAddr, valueAddr hostarch.Addr, s
func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
- if err == syserror.ENAMETOOLONG {
- return "", syserror.ERANGE
+ if linuxerr.Equals(linuxerr.ENAMETOOLONG, err) {
+ return "", linuxerr.ERANGE
}
return "", err
}
if len(name) == 0 {
- return "", syserror.ERANGE
+ return "", linuxerr.ERANGE
}
return name, nil
}
@@ -241,9 +241,9 @@ func checkXattrPermissions(t *kernel.Task, i *fs.Inode, perms fs.PermMask) error
// Restrict xattrs to regular files and directories.
if !xattrFileTypeOk(i) {
if perms.Write {
- return syserror.EPERM
+ return linuxerr.EPERM
}
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
return i.CheckPermission(t, perms)
@@ -268,7 +268,7 @@ func FListXattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// TODO(b/113957122): Return EBADF if the fd was opened with O_PATH.
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
@@ -293,7 +293,7 @@ func listXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSymlin
n := 0
err = fileOpOn(t, linux.AT_FDCWD, path, resolveSymlink, func(_ *fs.Dirent, d *fs.Dirent, _ uint) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
n, err = listXattr(t, d, listAddr, size)
@@ -333,10 +333,10 @@ func listXattr(t *kernel.Task, d *fs.Dirent, addr hostarch.Addr, size uint64) (i
listSize := xattrListSize(xattrs)
if listSize > linux.XATTR_SIZE_MAX {
- return 0, syserror.E2BIG
+ return 0, linuxerr.E2BIG
}
if uint64(listSize) > requestedSize {
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
// Don't copy out the attributes if size is 0.
@@ -382,7 +382,7 @@ func FRemoveXattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
// TODO(b/113957122): Return EBADF if the fd was opened with O_PATH.
f := t.GetFile(fd)
if f == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer f.DecRef(t)
@@ -400,7 +400,7 @@ func removeXattrFromPath(t *kernel.Task, args arch.SyscallArguments, resolveSyml
return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, resolveSymlink, func(_ *fs.Dirent, d *fs.Dirent, _ uint) error {
if dirPath && !fs.IsDir(d.Inode.StableAttr) {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
return removeXattr(t, d, nameAddr)
@@ -419,7 +419,7 @@ func removeXattr(t *kernel.Task, d *fs.Dirent, nameAddr hostarch.Addr) error {
}
if !strings.HasPrefix(name, linux.XATTR_USER_PREFIX) {
- return syserror.EOPNOTSUPP
+ return linuxerr.EOPNOTSUPP
}
if err := d.Inode.RemoveXattr(t, d, name); err != nil {
diff --git a/pkg/sentry/syscalls/linux/timespec.go b/pkg/sentry/syscalls/linux/timespec.go
index 3edc922eb..d90652a3f 100644
--- a/pkg/sentry/syscalls/linux/timespec.go
+++ b/pkg/sentry/syscalls/linux/timespec.go
@@ -18,9 +18,9 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// copyTimespecIn copies a Timespec from the untrusted app range to the kernel.
@@ -37,7 +37,7 @@ func copyTimespecIn(t *kernel.Task, addr hostarch.Addr) (linux.Timespec, error)
ts.Nsec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return ts, nil
default:
- return linux.Timespec{}, syserror.ENOSYS
+ return linux.Timespec{}, linuxerr.ENOSYS
}
}
@@ -51,7 +51,7 @@ func copyTimespecOut(t *kernel.Task, addr hostarch.Addr, ts *linux.Timespec) err
_, err := t.CopyOutBytes(addr, out)
return err
default:
- return syserror.ENOSYS
+ return linuxerr.ENOSYS
}
}
@@ -69,7 +69,7 @@ func copyTimevalIn(t *kernel.Task, addr hostarch.Addr) (linux.Timeval, error) {
tv.Usec = int64(hostarch.ByteOrder.Uint64(in[8:]))
return tv, nil
default:
- return linux.Timeval{}, syserror.ENOSYS
+ return linux.Timeval{}, linuxerr.ENOSYS
}
}
@@ -83,7 +83,7 @@ func copyTimevalOut(t *kernel.Task, addr hostarch.Addr, tv *linux.Timeval) error
_, err := t.CopyOutBytes(addr, out)
return err
default:
- return syserror.ENOSYS
+ return linuxerr.ENOSYS
}
}
@@ -103,7 +103,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.
return 0, err
}
if !timespec.Valid() {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
timeout = time.Duration(timespec.ToNsecCapped())
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/BUILD b/pkg/sentry/syscalls/linux/vfs2/BUILD
index 5ce0bc714..1e3bd2a50 100644
--- a/pkg/sentry/syscalls/linux/vfs2/BUILD
+++ b/pkg/sentry/syscalls/linux/vfs2/BUILD
@@ -41,6 +41,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/bits",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fspath",
"//pkg/gohacks",
"//pkg/hostarch",
@@ -72,7 +73,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sync",
"//pkg/syserr",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
],
diff --git a/pkg/sentry/syscalls/linux/vfs2/aio.go b/pkg/sentry/syscalls/linux/vfs2/aio.go
index fd1863ef3..0b57c0f7c 100644
--- a/pkg/sentry/syscalls/linux/vfs2/aio.go
+++ b/pkg/sentry/syscalls/linux/vfs2/aio.go
@@ -17,6 +17,8 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/eventfd"
@@ -24,10 +26,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/mm"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// IoSubmit implements linux syscall io_submit(2).
@@ -37,7 +36,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
addr := args[2].Pointer()
if nrEvents < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
for i := int32(0); i < nrEvents; i++ {
@@ -56,7 +55,7 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
cbAddr = hostarch.Addr(cbAddrP)
default:
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
// Copy in this callback.
@@ -90,12 +89,12 @@ func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// submitCallback processes a single callback.
func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr hostarch.Addr) error {
if cb.Reserved2 != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
fd := t.GetFileVFS2(cb.FD)
if fd == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer fd.DecRef(t)
@@ -104,13 +103,13 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr host
if cb.Flags&linux.IOCB_FLAG_RESFD != 0 {
eventFD = t.GetFileVFS2(cb.ResFD)
if eventFD == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
defer eventFD.DecRef(t)
// Check that it is an eventfd.
if _, ok := eventFD.Impl().(*eventfd.EventFileDescription); !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -123,14 +122,14 @@ func submitCallback(t *kernel.Task, id uint64, cb *linux.IOCallback, cbAddr host
switch cb.OpCode {
case linux.IOCB_CMD_PREAD, linux.IOCB_CMD_PREADV, linux.IOCB_CMD_PWRITE, linux.IOCB_CMD_PWRITEV:
if cb.Offset < 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
// Prepare the request.
aioCtx, ok := t.MemoryManager().LookupAIOContext(t, id)
if !ok {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if err := aioCtx.Prepare(); err != nil {
return err
@@ -200,7 +199,7 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
bytes := int(cb.Bytes)
if bytes < 0 {
// Linux also requires that this field fit in ssize_t.
- return usermem.IOSequence{}, syserror.EINVAL
+ return usermem.IOSequence{}, linuxerr.EINVAL
}
// Since this I/O will be asynchronous with respect to t's task goroutine,
@@ -222,6 +221,6 @@ func memoryFor(t *kernel.Task, cb *linux.IOCallback) (usermem.IOSequence, error)
default:
// Not a supported command.
- return usermem.IOSequence{}, syserror.EINVAL
+ return usermem.IOSequence{}, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/epoll.go b/pkg/sentry/syscalls/linux/vfs2/epoll.go
index 047d955b6..84010db77 100644
--- a/pkg/sentry/syscalls/linux/vfs2/epoll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/epoll.go
@@ -19,12 +19,12 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -34,7 +34,7 @@ var sizeofEpollEvent = (*linux.EpollEvent)(nil).SizeBytes()
func EpollCreate1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
flags := args[0].Int()
if flags&^linux.EPOLL_CLOEXEC != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file, err := t.Kernel().VFS().NewEpollInstanceFD(t)
@@ -59,7 +59,7 @@ func EpollCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// "Since Linux 2.6.8, the size argument is ignored, but must be greater
// than zero" - epoll_create(2)
if size <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file, err := t.Kernel().VFS().NewEpollInstanceFD(t)
@@ -84,20 +84,20 @@ func EpollCtl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
epfile := t.GetFileVFS2(epfd)
if epfile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer epfile.DecRef(t)
ep, ok := epfile.Impl().(*vfs.EpollInstance)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if epfile == file {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var event linux.EpollEvent
@@ -115,24 +115,24 @@ func EpollCtl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
return 0, nil, ep.ModifyInterest(file, fd, event)
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
func waitEpoll(t *kernel.Task, epfd int32, eventsAddr hostarch.Addr, maxEvents int, timeoutInNanos int64) (uintptr, *kernel.SyscallControl, error) {
var _EP_MAX_EVENTS = math.MaxInt32 / sizeofEpollEvent // Linux: fs/eventpoll.c:EP_MAX_EVENTS
if maxEvents <= 0 || maxEvents > _EP_MAX_EVENTS {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
epfile := t.GetFileVFS2(epfd)
if epfile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer epfile.DecRef(t)
ep, ok := epfile.Impl().(*vfs.EpollInstance)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Allocate space for a few events on the stack for the common case in
@@ -174,7 +174,7 @@ func waitEpoll(t *kernel.Task, epfd int32, eventsAddr hostarch.Addr, maxEvents i
haveDeadline = true
}
if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
err = nil
}
return 0, nil, err
diff --git a/pkg/sentry/syscalls/linux/vfs2/eventfd.go b/pkg/sentry/syscalls/linux/vfs2/eventfd.go
index 807f909da..0dcf1fbff 100644
--- a/pkg/sentry/syscalls/linux/vfs2/eventfd.go
+++ b/pkg/sentry/syscalls/linux/vfs2/eventfd.go
@@ -16,10 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/eventfd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Eventfd2 implements linux syscall eventfd2(2).
@@ -29,7 +29,7 @@ func Eventfd2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
allOps := uint(linux.EFD_SEMAPHORE | linux.EFD_NONBLOCK | linux.EFD_CLOEXEC)
if flags & ^allOps != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
vfsObj := t.Kernel().VFS()
diff --git a/pkg/sentry/syscalls/linux/vfs2/execve.go b/pkg/sentry/syscalls/linux/vfs2/execve.go
index 3315398a4..fcf2e25de 100644
--- a/pkg/sentry/syscalls/linux/vfs2/execve.go
+++ b/pkg/sentry/syscalls/linux/vfs2/execve.go
@@ -16,16 +16,15 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsbridge"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/loader"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Execve implements linux syscall execve(2).
@@ -48,7 +47,7 @@ func Execveat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr hostarch.Addr, flags int32) (uintptr, *kernel.SyscallControl, error) {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
pathname, err := t.CopyInString(pathnameAddr, linux.PATH_MAX)
@@ -83,11 +82,11 @@ func execveat(t *kernel.Task, dirfd int32, pathnameAddr, argvAddr, envvAddr host
// do_open_execat(fd=AT_FDCWD)), and the loader package is currently
// incapable of handling this correctly.
if !path.HasComponents() && flags&linux.AT_EMPTY_PATH == 0 {
- return 0, nil, syserror.ENOENT
+ return 0, nil, linuxerr.ENOENT
}
dirfile, dirfileFlags := t.FDTable().GetVFS2(dirfd)
if dirfile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
start := dirfile.VirtualDentry()
start.IncRef()
diff --git a/pkg/sentry/syscalls/linux/vfs2/fd.go b/pkg/sentry/syscalls/linux/vfs2/fd.go
index 1a31898e8..2198aa065 100644
--- a/pkg/sentry/syscalls/linux/vfs2/fd.go
+++ b/pkg/sentry/syscalls/linux/vfs2/fd.go
@@ -16,6 +16,7 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
@@ -24,7 +25,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Close implements Linux syscall close(2).
@@ -36,12 +36,12 @@ func Close(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// (and other reference-holding operations complete).
_, file := t.FDTable().Remove(t, fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
err := file.OnClose(t)
- return 0, nil, slinux.HandleIOErrorVFS2(t, false /* partial */, err, syserror.EINTR, "close", file)
+ return 0, nil, slinux.HandleIOErrorVFS2(t, false /* partial */, err, linuxerr.EINTR, "close", file)
}
// Dup implements Linux syscall dup(2).
@@ -50,13 +50,13 @@ func Dup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
newFD, err := t.NewFDFromVFS2(0, file, kernel.FDFlags{})
if err != nil {
- return 0, nil, syserror.EMFILE
+ return 0, nil, linuxerr.EMFILE
}
return uintptr(newFD), nil, nil
}
@@ -70,7 +70,7 @@ func Dup2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// As long as oldfd is valid, dup2() does nothing and returns newfd.
file := t.GetFileVFS2(oldfd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
file.DecRef(t)
return uintptr(newfd), nil, nil
@@ -86,7 +86,7 @@ func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
flags := args[2].Uint()
if oldfd == newfd {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return dup3(t, oldfd, newfd, flags)
@@ -94,12 +94,12 @@ func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
func dup3(t *kernel.Task, oldfd, newfd int32, flags uint32) (uintptr, *kernel.SyscallControl, error) {
if flags&^linux.O_CLOEXEC != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(oldfd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -119,7 +119,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file, flags := t.FDTable().GetVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -128,7 +128,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.F_DUPFD, linux.F_DUPFD_CLOEXEC, linux.F_GETFD, linux.F_SETFD, linux.F_GETFL:
// allowed
default:
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
}
@@ -169,7 +169,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
if who < 0 {
// Check for overflow before flipping the sign.
if who-1 > who {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ownerType = linux.F_OWNER_PGRP
who = -who
@@ -192,7 +192,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.F_SETPIPE_SZ:
pipefile, ok := file.Impl().(*pipe.VFSPipeFD)
if !ok {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
n, err := pipefile.SetPipeSize(int64(args[2].Int()))
if err != nil {
@@ -202,7 +202,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
case linux.F_GETPIPE_SZ:
pipefile, ok := file.Impl().(*pipe.VFSPipeFD)
if !ok {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
return uintptr(pipefile.PipeSize()), nil, nil
case linux.F_GET_SEALS:
@@ -210,7 +210,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return uintptr(val), nil, err
case linux.F_ADD_SEALS:
if !file.IsWritable() {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
err := tmpfs.AddSeals(file, args[2].Uint())
return 0, nil, err
@@ -232,7 +232,7 @@ func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
return 0, nil, a.SetSignal(linux.Signal(args[2].Int()))
default:
// Everything else is not yet supported.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
@@ -269,7 +269,7 @@ func setAsyncOwner(t *kernel.Task, fd int, file *vfs.FileDescription, ownerType,
case linux.F_OWNER_TID, linux.F_OWNER_PID, linux.F_OWNER_PGRP:
// Acceptable type.
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
a := file.SetAsyncHandler(fasync.NewVFS2(fd)).(*fasync.FileAsync)
@@ -282,26 +282,26 @@ func setAsyncOwner(t *kernel.Task, fd int, file *vfs.FileDescription, ownerType,
case linux.F_OWNER_TID:
task := t.PIDNamespace().TaskWithID(kernel.ThreadID(pid))
if task == nil {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
a.SetOwnerTask(t, task)
return nil
case linux.F_OWNER_PID:
tg := t.PIDNamespace().ThreadGroupWithID(kernel.ThreadID(pid))
if tg == nil {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
a.SetOwnerThreadGroup(t, tg)
return nil
case linux.F_OWNER_PGRP:
pg := t.PIDNamespace().ProcessGroupWithID(kernel.ProcessGroupID(pid))
if pg == nil {
- return syserror.ESRCH
+ return linuxerr.ESRCH
}
a.SetOwnerProcessGroup(t, pg)
return nil
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -319,7 +319,7 @@ func posixTestLock(t *kernel.Task, args arch.SyscallArguments, file *vfs.FileDes
case linux.F_WRLCK:
typ = lock.WriteLock
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
r, err := file.ComputeLockRange(t, uint64(flock.Start), uint64(flock.Len), flock.Whence)
if err != nil {
@@ -368,13 +368,13 @@ func posixLock(t *kernel.Task, args arch.SyscallArguments, file *vfs.FileDescrip
switch flock.Type {
case linux.F_RDLCK:
if !file.IsReadable() {
- return syserror.EBADF
+ return linuxerr.EBADF
}
return file.LockPOSIX(t, t.FDTable(), int32(t.TGIDInRoot()), lock.ReadLock, r, blocker)
case linux.F_WRLCK:
if !file.IsWritable() {
- return syserror.EBADF
+ return linuxerr.EBADF
}
return file.LockPOSIX(t, t.FDTable(), int32(t.TGIDInRoot()), lock.WriteLock, r, blocker)
@@ -382,7 +382,7 @@ func posixLock(t *kernel.Task, args arch.SyscallArguments, file *vfs.FileDescrip
return file.UnlockPOSIX(t, t.FDTable(), r)
default:
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -395,22 +395,22 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
// Note: offset is allowed to be negative.
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if file.StatusFlags()&linux.O_PATH != 0 {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// If the FD refers to a pipe or FIFO, return error.
if _, isPipe := file.Impl().(*pipe.VFSPipeFD); isPipe {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
switch advice {
@@ -421,7 +421,7 @@ func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
case linux.POSIX_FADV_DONTNEED:
case linux.POSIX_FADV_NOREUSE:
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Sure, whatever.
diff --git a/pkg/sentry/syscalls/linux/vfs2/filesystem.go b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
index 36aa1d3ae..f19f0fd41 100644
--- a/pkg/sentry/syscalls/linux/vfs2/filesystem.go
+++ b/pkg/sentry/syscalls/linux/vfs2/filesystem.go
@@ -16,12 +16,11 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Link implements Linux syscall link(2).
@@ -43,10 +42,10 @@ func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
func linkat(t *kernel.Task, olddirfd int32, oldpathAddr hostarch.Addr, newdirfd int32, newpathAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_FOLLOW) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if flags&linux.AT_EMPTY_PATH != 0 && !t.HasCapability(linux.CAP_DAC_READ_SEARCH) {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
oldpath, err := copyInPath(t, oldpathAddr)
@@ -290,7 +289,7 @@ func Unlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
flags := args[2].Int()
if flags&^linux.AT_REMOVEDIR != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if flags&linux.AT_REMOVEDIR != 0 {
@@ -320,7 +319,7 @@ func symlinkat(t *kernel.Task, targetAddr hostarch.Addr, newdirfd int32, linkpat
return err
}
if len(target) == 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
linkpath, err := copyInPath(t, linkpathAddr)
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/vfs2/fscontext.go b/pkg/sentry/syscalls/linux/vfs2/fscontext.go
index a7d4d2a36..1e36d9c76 100644
--- a/pkg/sentry/syscalls/linux/vfs2/fscontext.go
+++ b/pkg/sentry/syscalls/linux/vfs2/fscontext.go
@@ -16,11 +16,11 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Getcwd implements Linux syscall getcwd(2).
@@ -39,7 +39,7 @@ func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Note this is >= because we need a terminator.
if uint(len(s)) >= size {
- return 0, nil, syserror.ERANGE
+ return 0, nil, linuxerr.ERANGE
}
// Construct a byte slice containing a NUL terminator.
@@ -106,7 +106,7 @@ func Chroot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
addr := args[0].Pointer()
if !t.HasCapability(linux.CAP_SYS_CHROOT) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
path, err := copyInPath(t, addr)
diff --git a/pkg/sentry/syscalls/linux/vfs2/getdents.go b/pkg/sentry/syscalls/linux/vfs2/getdents.go
index b41a3056a..c2c3172bc 100644
--- a/pkg/sentry/syscalls/linux/vfs2/getdents.go
+++ b/pkg/sentry/syscalls/linux/vfs2/getdents.go
@@ -17,13 +17,12 @@ package vfs2
import (
"fmt"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Getdents implements Linux syscall getdents(2).
@@ -43,7 +42,7 @@ func getdents(t *kernel.Task, args arch.SyscallArguments, isGetdents64 bool) (ui
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -100,7 +99,7 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
size := 8 + 8 + 2 + 1 + 1 + len(dirent.Name)
size = (size + 7) &^ 7 // round up to multiple of 8
if size > cb.remaining {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
@@ -134,7 +133,7 @@ func (cb *getdentsCallback) Handle(dirent vfs.Dirent) error {
size := 8 + 8 + 2 + 1 + 1 + len(dirent.Name)
size = (size + 7) &^ 7 // round up to multiple of sizeof(long)
if size > cb.remaining {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
buf = cb.t.CopyScratchBuffer(size)
hostarch.ByteOrder.PutUint64(buf[0:8], dirent.Ino)
diff --git a/pkg/sentry/syscalls/linux/vfs2/inotify.go b/pkg/sentry/syscalls/linux/vfs2/inotify.go
index 11753d8e5..d8d5dd7ad 100644
--- a/pkg/sentry/syscalls/linux/vfs2/inotify.go
+++ b/pkg/sentry/syscalls/linux/vfs2/inotify.go
@@ -16,10 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
)
const allFlags = linux.IN_NONBLOCK | linux.IN_CLOEXEC
@@ -28,7 +28,7 @@ const allFlags = linux.IN_NONBLOCK | linux.IN_CLOEXEC
func InotifyInit1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
flags := args[0].Int()
if flags&^allFlags != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ino, err := vfs.NewInotifyFD(t, t.Kernel().VFS(), uint32(flags))
@@ -60,14 +60,14 @@ func fdToInotify(t *kernel.Task, fd int32) (*vfs.Inotify, *vfs.FileDescription,
f := t.GetFileVFS2(fd)
if f == nil {
// Invalid fd.
- return nil, nil, syserror.EBADF
+ return nil, nil, linuxerr.EBADF
}
ino, ok := f.Impl().(*vfs.Inotify)
if !ok {
// Not an inotify fd.
f.DecRef(t)
- return nil, nil, syserror.EINVAL
+ return nil, nil, linuxerr.EINVAL
}
return ino, f, nil
@@ -82,7 +82,7 @@ func InotifyAddWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kern
// "EINVAL: The given event mask contains no valid events."
// -- inotify_add_watch(2)
if mask&linux.ALL_INOTIFY_BITS == 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "IN_DONT_FOLLOW: Don't dereference pathname if it is a symbolic link."
diff --git a/pkg/sentry/syscalls/linux/vfs2/ioctl.go b/pkg/sentry/syscalls/linux/vfs2/ioctl.go
index c7c3fed57..b806120cd 100644
--- a/pkg/sentry/syscalls/linux/vfs2/ioctl.go
+++ b/pkg/sentry/syscalls/linux/vfs2/ioctl.go
@@ -16,10 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Ioctl implements Linux syscall ioctl(2).
@@ -28,12 +28,12 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if file.StatusFlags()&linux.O_PATH != 0 {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Handle ioctls that apply to all FDs.
@@ -99,7 +99,7 @@ func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
if who < 0 {
// Check for overflow before flipping the sign.
if who-1 > who {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
ownerType = linux.F_OWNER_PGRP
who = -who
diff --git a/pkg/sentry/syscalls/linux/vfs2/lock.go b/pkg/sentry/syscalls/linux/vfs2/lock.go
index d1452a04d..008603173 100644
--- a/pkg/sentry/syscalls/linux/vfs2/lock.go
+++ b/pkg/sentry/syscalls/linux/vfs2/lock.go
@@ -16,10 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Flock implements linux syscall flock(2).
@@ -30,7 +30,7 @@ func Flock(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
// flock(2): EBADF fd is not an open file descriptor.
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -57,7 +57,7 @@ func Flock(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
}
default:
// flock(2): EINVAL operation is invalid.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, nil
diff --git a/pkg/sentry/syscalls/linux/vfs2/memfd.go b/pkg/sentry/syscalls/linux/vfs2/memfd.go
index c4c0f9e0a..70c2cf5a5 100644
--- a/pkg/sentry/syscalls/linux/vfs2/memfd.go
+++ b/pkg/sentry/syscalls/linux/vfs2/memfd.go
@@ -16,10 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
const (
@@ -35,7 +35,7 @@ func MemfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
if flags&^memfdAllFlags != 0 {
// Unknown bits in flags.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
allowSeals := flags&linux.MFD_ALLOW_SEALING != 0
diff --git a/pkg/sentry/syscalls/linux/vfs2/mmap.go b/pkg/sentry/syscalls/linux/vfs2/mmap.go
index c961545f6..c804f9fd3 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mmap.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mmap.go
@@ -16,13 +16,12 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Mmap implements Linux syscall mmap(2).
@@ -38,7 +37,7 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Require exactly one of MAP_PRIVATE and MAP_SHARED.
if private == shared {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
opts := memmap.MMapOpts{
@@ -71,13 +70,13 @@ func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Convert the passed FD to a file reference.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// mmap unconditionally requires that the FD is readable.
if !file.IsReadable() {
- return 0, nil, syserror.EACCES
+ return 0, nil, linuxerr.EACCES
}
// MAP_SHARED requires that the FD be writable for PROT_WRITE.
if shared && !file.IsWritable() {
diff --git a/pkg/sentry/syscalls/linux/vfs2/mount.go b/pkg/sentry/syscalls/linux/vfs2/mount.go
index dd93430e2..4d73d46ef 100644
--- a/pkg/sentry/syscalls/linux/vfs2/mount.go
+++ b/pkg/sentry/syscalls/linux/vfs2/mount.go
@@ -16,12 +16,11 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Mount implements Linux syscall mount(2).
@@ -69,7 +68,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// namespace.
creds := t.Credentials()
if !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespaceVFS2().Owner) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
const unsupportedOps = linux.MS_REMOUNT | linux.MS_BIND |
@@ -84,7 +83,7 @@ func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
// unknown or unsupported flags are passed. Since we don't implement
// everything, we fail explicitly on flags that are unimplemented.
if flags&(unsupportedOps|unsupportedFlags) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var opts vfs.MountOptions
@@ -125,12 +124,12 @@ func Umount2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// Currently, this is always the init task's user namespace.
creds := t.Credentials()
if !creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespaceVFS2().Owner) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
const unsupported = linux.MNT_FORCE | linux.MNT_EXPIRE
if flags&unsupported != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, err := copyInPath(t, addr)
diff --git a/pkg/sentry/syscalls/linux/vfs2/path.go b/pkg/sentry/syscalls/linux/vfs2/path.go
index 2aaf1ed74..38796d4db 100644
--- a/pkg/sentry/syscalls/linux/vfs2/path.go
+++ b/pkg/sentry/syscalls/linux/vfs2/path.go
@@ -16,12 +16,11 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
func copyInPath(t *kernel.Task, addr hostarch.Addr) (fspath.Path, error) {
@@ -44,7 +43,7 @@ func getTaskPathOperation(t *kernel.Task, dirfd int32, path fspath.Path, shouldA
if !path.Absolute {
if !path.HasComponents() && !bool(shouldAllowEmptyPath) {
root.DecRef(t)
- return taskPathOperation{}, syserror.ENOENT
+ return taskPathOperation{}, linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectoryVFS2()
@@ -53,7 +52,7 @@ func getTaskPathOperation(t *kernel.Task, dirfd int32, path fspath.Path, shouldA
dirfile := t.GetFileVFS2(dirfd)
if dirfile == nil {
root.DecRef(t)
- return taskPathOperation{}, syserror.EBADF
+ return taskPathOperation{}, linuxerr.EBADF
}
start = dirfile.VirtualDentry()
start.IncRef()
diff --git a/pkg/sentry/syscalls/linux/vfs2/pipe.go b/pkg/sentry/syscalls/linux/vfs2/pipe.go
index c6fc1954c..07a89cf4e 100644
--- a/pkg/sentry/syscalls/linux/vfs2/pipe.go
+++ b/pkg/sentry/syscalls/linux/vfs2/pipe.go
@@ -16,14 +16,13 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/pipefs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Pipe implements Linux syscall pipe(2).
@@ -41,7 +40,7 @@ func Pipe2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
func pipe2(t *kernel.Task, addr hostarch.Addr, flags int32) error {
if flags&^(linux.O_NONBLOCK|linux.O_CLOEXEC) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
r, w, err := pipefs.NewConnectedPipeFDs(t, t.Kernel().PipeMount(), uint32(flags&linux.O_NONBLOCK))
if err != nil {
diff --git a/pkg/sentry/syscalls/linux/vfs2/poll.go b/pkg/sentry/syscalls/linux/vfs2/poll.go
index a69c80edd..204051cd0 100644
--- a/pkg/sentry/syscalls/linux/vfs2/poll.go
+++ b/pkg/sentry/syscalls/linux/vfs2/poll.go
@@ -19,15 +19,15 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
"gvisor.dev/gvisor/pkg/waiter"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// fileCap is the maximum allowable files for poll & select. This has no
@@ -132,7 +132,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
// Wait for a notification.
timeout, err = t.BlockWithTimeout(ch, haveTimeout, timeout)
if err != nil {
- if err == syserror.ETIMEDOUT {
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
err = nil
}
return timeout, 0, err
@@ -161,7 +161,7 @@ func pollBlock(t *kernel.Task, pfd []linux.PollFD, timeout time.Duration) (time.
// copyInPollFDs copies an array of struct pollfd unless nfds exceeds the max.
func copyInPollFDs(t *kernel.Task, addr hostarch.Addr, nfds uint) ([]linux.PollFD, error) {
if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
pfd := make([]linux.PollFD, nfds)
@@ -188,7 +188,7 @@ func doPoll(t *kernel.Task, addr hostarch.Addr, nfds uint, timeout time.Duration
pfd[i].Events |= linux.POLLHUP | linux.POLLERR
}
remainingTimeout, n, err := pollBlock(t, pfd, timeout)
- err = syserror.ConvertIntr(err, syserror.EINTR)
+ err = syserr.ConvertIntr(err, linuxerr.EINTR)
// The poll entries are copied out regardless of whether
// any are set or not. This aligns with the Linux behavior.
@@ -221,7 +221,7 @@ func CopyInFDSet(t *kernel.Task, addr hostarch.Addr, nBytes, nBitsInLastPartialB
func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Addr, timeout time.Duration) (uintptr, error) {
if nfds < 0 || nfds > fileCap {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Calculate the size of the fd sets (one bit per fd).
@@ -268,7 +268,7 @@ func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Ad
// OK. Linux is racy in the same way.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
file.DecRef(t)
@@ -298,7 +298,7 @@ func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs hostarch.Ad
// Do the syscall, then count the number of bits set.
if _, _, err = pollBlock(t, pfd, timeout); err != nil {
- return 0, syserror.ConvertIntr(err, syserror.EINTR)
+ return 0, syserr.ConvertIntr(err, linuxerr.EINTR)
}
// r, w, and e are currently event mask bitsets; unset bits corresponding
@@ -410,13 +410,13 @@ func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
func poll(t *kernel.Task, pfdAddr hostarch.Addr, nfds uint, timeout time.Duration) (uintptr, error) {
remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout)
// On an interrupt poll(2) is restarted with the remaining timeout.
- if err == syserror.EINTR {
+ if linuxerr.Equals(linuxerr.EINTR, err) {
t.SetSyscallRestartBlock(&pollRestartBlock{
pfdAddr: pfdAddr,
nfds: nfds,
timeout: remainingTimeout,
})
- return 0, syserror.ERESTART_RESTARTBLOCK
+ return 0, linuxerr.ERESTART_RESTARTBLOCK
}
return n, err
}
@@ -462,8 +462,8 @@ func Ppoll(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
//
// Note that this means that if err is nil but copyErr is not, copyErr is
// ignored. This is consistent with Linux.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
@@ -484,7 +484,7 @@ func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
return 0, nil, err
}
if timeval.Sec < 0 || timeval.Usec < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
timeout = time.Duration(timeval.ToNsecCapped())
}
@@ -492,8 +492,8 @@ func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout)
copyErr := copyOutTimevalRemaining(t, startNs, timeout, timevalAddr)
// See comment in Ppoll.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
@@ -539,8 +539,8 @@ func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout)
copyErr := copyOutTimespecRemaining(t, startNs, timeout, timespecAddr)
// See comment in Ppoll.
- if err == syserror.EINTR && copyErr == nil {
- err = syserror.ERESTARTNOHAND
+ if linuxerr.Equals(linuxerr.EINTR, err) && copyErr == nil {
+ err = linuxerr.ERESTARTNOHAND
}
return n, nil, err
}
@@ -561,7 +561,7 @@ func copyTimespecInToDuration(t *kernel.Task, timespecAddr hostarch.Addr) (time.
return 0, err
}
if !timespec.Valid() {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
timeout = time.Duration(timespec.ToNsecCapped())
}
@@ -573,7 +573,7 @@ func setTempSignalSet(t *kernel.Task, maskAddr hostarch.Addr, maskSize uint) err
return nil
}
if maskSize != linux.SignalSetSize {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
var mask linux.SignalSet
if _, err := mask.CopyIn(t, maskAddr); err != nil {
diff --git a/pkg/sentry/syscalls/linux/vfs2/read_write.go b/pkg/sentry/syscalls/linux/vfs2/read_write.go
index b863d7b84..4e7dc5080 100644
--- a/pkg/sentry/syscalls/linux/vfs2/read_write.go
+++ b/pkg/sentry/syscalls/linux/vfs2/read_write.go
@@ -18,13 +18,13 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
"gvisor.dev/gvisor/pkg/sentry/socket"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -42,14 +42,14 @@ func Read(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -62,7 +62,7 @@ func Read(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
n, err := read(t, file, dst, vfs.ReadOptions{})
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "read", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "read", file)
}
// Readv implements Linux syscall readv(2).
@@ -73,7 +73,7 @@ func Readv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -87,12 +87,12 @@ func Readv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
n, err := read(t, file, dst, vfs.ReadOptions{})
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "readv", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "readv", file)
}
func read(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
n, err := file.Read(t, dst, opts)
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
return n, err
}
@@ -114,14 +114,14 @@ func read(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, opt
// "would block".
n, err = file.Read(t, dst, opts)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, hasDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -140,19 +140,19 @@ func Pread64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -165,7 +165,7 @@ func Pread64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := pread(t, file, dst, offset, vfs.ReadOptions{})
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "pread64", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "pread64", file)
}
// Preadv implements Linux syscall preadv(2).
@@ -177,13 +177,13 @@ func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -196,7 +196,7 @@ func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := pread(t, file, dst, offset, vfs.ReadOptions{})
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "preadv", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "preadv", file)
}
// Preadv2 implements Linux syscall preadv2(2).
@@ -215,13 +215,13 @@ func Preadv2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < -1 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the destination of the read.
@@ -242,12 +242,12 @@ func Preadv2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err = pread(t, file, dst, offset, opts)
}
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "preadv2", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "preadv2", file)
}
func pread(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
n, err := file.PRead(t, dst, offset, opts)
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
return n, err
}
@@ -269,14 +269,14 @@ func pread(t *kernel.Task, file *vfs.FileDescription, dst usermem.IOSequence, of
// "would block".
n, err = file.PRead(t, dst, offset+total, opts)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, hasDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -293,14 +293,14 @@ func Write(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -313,7 +313,7 @@ func Write(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
n, err := write(t, file, src, vfs.WriteOptions{})
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "write", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "write", file)
}
// Writev implements Linux syscall writev(2).
@@ -324,7 +324,7 @@ func Writev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -338,12 +338,12 @@ func Writev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
n, err := write(t, file, src, vfs.WriteOptions{})
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "writev", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "writev", file)
}
func write(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
n, err := file.Write(t, src, opts)
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
return n, err
}
@@ -365,14 +365,14 @@ func write(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, op
// "would block".
n, err = file.Write(t, src, opts)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, hasDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -390,19 +390,19 @@ func Pwrite64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check that the size is legitimate.
si := int(size)
if si < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -415,7 +415,7 @@ func Pwrite64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
n, err := pwrite(t, file, src, offset, vfs.WriteOptions{})
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "pwrite64", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "pwrite64", file)
}
// Pwritev implements Linux syscall pwritev(2).
@@ -427,13 +427,13 @@ func Pwritev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -446,7 +446,7 @@ func Pwritev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
n, err := pwrite(t, file, src, offset, vfs.WriteOptions{})
t.IOUsage().AccountReadSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "pwritev", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "pwritev", file)
}
// Pwritev2 implements Linux syscall pwritev2(2).
@@ -465,13 +465,13 @@ func Pwritev2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the offset is legitimate.
if offset < -1 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get the source of the write.
@@ -492,12 +492,12 @@ func Pwritev2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
n, err = pwrite(t, file, src, offset, opts)
}
t.IOUsage().AccountWriteSyscall(n)
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "pwritev2", file)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "pwritev2", file)
}
func pwrite(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
n, err := file.PWrite(t, src, offset, opts)
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
return n, err
}
@@ -519,14 +519,14 @@ func pwrite(t *kernel.Task, file *vfs.FileDescription, src usermem.IOSequence, o
// "would block".
n, err = file.PWrite(t, src, offset+total, opts)
total += n
- if err != syserror.ErrWouldBlock {
+ if err != linuxerr.ErrWouldBlock {
break
}
// Wait for a notification that we should retry.
if err = t.BlockWithDeadline(ch, hasDeadline, deadline); err != nil {
- if err == syserror.ETIMEDOUT {
- err = syserror.ErrWouldBlock
+ if linuxerr.Equals(linuxerr.ETIMEDOUT, err) {
+ err = linuxerr.ErrWouldBlock
}
break
}
@@ -560,7 +560,7 @@ func Lseek(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -576,27 +576,27 @@ func Readahead(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Check that the file is readable.
if !file.IsReadable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Check that the size is valid.
if int(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check that the offset is legitimate and does not overflow.
if offset < 0 || offset+int64(size) < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Return EINVAL; if the underlying file type does not support readahead,
// then Linux will return EINVAL to indicate as much. In the future, we
// may extend this function to actually support readahead hints.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/setstat.go b/pkg/sentry/syscalls/linux/vfs2/setstat.go
index 647e089d0..e608572b4 100644
--- a/pkg/sentry/syscalls/linux/vfs2/setstat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/setstat.go
@@ -16,15 +16,14 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
const chmodMask = 0777 | linux.S_ISUID | linux.S_ISGID | linux.S_ISVTX
@@ -65,7 +64,7 @@ func Fchmod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -105,7 +104,7 @@ func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
func fchownat(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, owner, group, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
path, err := copyInPath(t, pathAddr)
@@ -126,7 +125,7 @@ func populateSetStatOptionsForChown(t *kernel.Task, owner, group int32, opts *vf
if owner != -1 {
kuid := userns.MapToKUID(auth.UID(owner))
if !kuid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts.Stat.Mask |= linux.STATX_UID
opts.Stat.UID = uint32(kuid)
@@ -134,7 +133,7 @@ func populateSetStatOptionsForChown(t *kernel.Task, owner, group int32, opts *vf
if group != -1 {
kgid := userns.MapToKGID(auth.GID(group))
if !kgid.Ok() {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts.Stat.Mask |= linux.STATX_GID
opts.Stat.GID = uint32(kgid)
@@ -150,7 +149,7 @@ func Fchown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -167,7 +166,7 @@ func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
length := args[1].Int64()
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, err := copyInPath(t, addr)
@@ -191,17 +190,17 @@ func Ftruncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
length := args[1].Int64()
if length < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if !file.IsWritable() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
err := file.SetStat(t, vfs.SetStatOptions{
@@ -222,23 +221,23 @@ func Fallocate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if !file.IsWritable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
if mode != 0 {
- return 0, nil, syserror.ENOTSUP
+ return 0, nil, linuxerr.ENOTSUP
}
if offset < 0 || length <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
size := offset + length
if size < 0 {
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
limit := limits.FromContext(t).Get(limits.FileSize).Cur
if uint64(size) >= limit {
@@ -246,7 +245,7 @@ func Fallocate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
Signo: int32(linux.SIGXFSZ),
Code: linux.SI_USER,
})
- return 0, nil, syserror.EFBIG
+ return 0, nil, linuxerr.EFBIG
}
return 0, nil, file.Allocate(t, mode, uint64(offset), uint64(length))
@@ -340,7 +339,7 @@ func populateSetStatOptionsForUtimes(t *kernel.Task, timesAddr hostarch.Addr, op
return err
}
if times[0].Usec < 0 || times[0].Usec > 999999 || times[1].Usec < 0 || times[1].Usec > 999999 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts.Stat.Mask = linux.STATX_ATIME | linux.STATX_MTIME
opts.Stat.Atime = linux.StatxTimestamp{
@@ -372,7 +371,7 @@ func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
}
if flags&^linux.AT_SYMLINK_NOFOLLOW != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// "If filename is NULL and dfd refers to an open file, then operate on the
@@ -405,7 +404,7 @@ func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr hostarch.Addr, o
}
if times[0].Nsec != linux.UTIME_OMIT {
if times[0].Nsec != linux.UTIME_NOW && (times[0].Nsec < 0 || times[0].Nsec > 999999999) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts.Stat.Mask |= linux.STATX_ATIME
opts.Stat.Atime = linux.StatxTimestamp{
@@ -415,7 +414,7 @@ func populateSetStatOptionsForUtimens(t *kernel.Task, timesAddr hostarch.Addr, o
}
if times[1].Nsec != linux.UTIME_OMIT {
if times[1].Nsec != linux.UTIME_NOW && (times[1].Nsec < 0 || times[1].Nsec > 999999999) {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts.Stat.Mask |= linux.STATX_MTIME
opts.Stat.Mtime = linux.StatxTimestamp{
@@ -432,7 +431,7 @@ func setstatat(t *kernel.Task, dirfd int32, path fspath.Path, shouldAllowEmptyPa
start := root
if !path.Absolute {
if !path.HasComponents() && !bool(shouldAllowEmptyPath) {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectoryVFS2()
@@ -440,7 +439,7 @@ func setstatat(t *kernel.Task, dirfd int32, path fspath.Path, shouldAllowEmptyPa
} else {
dirfile := t.GetFileVFS2(dirfd)
if dirfile == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
if !path.HasComponents() {
// Use FileDescription.SetStat() instead of
@@ -465,10 +464,10 @@ func setstatat(t *kernel.Task, dirfd int32, path fspath.Path, shouldAllowEmptyPa
}
func handleSetSizeError(t *kernel.Task, err error) error {
- if err == syserror.ErrExceedsFileSizeLimit {
+ if err == linuxerr.ErrExceedsFileSizeLimit {
// Convert error to EFBIG and send a SIGXFSZ per setrlimit(2).
t.SendSignal(kernel.SignalInfoNoInfo(linux.SIGXFSZ, t, t))
- return syserror.EFBIG
+ return linuxerr.EFBIG
}
return err
}
diff --git a/pkg/sentry/syscalls/linux/vfs2/signal.go b/pkg/sentry/syscalls/linux/vfs2/signal.go
index 6163da103..27fb2139b 100644
--- a/pkg/sentry/syscalls/linux/vfs2/signal.go
+++ b/pkg/sentry/syscalls/linux/vfs2/signal.go
@@ -16,13 +16,12 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/signalfd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// sharedSignalfd is shared between the two calls.
@@ -35,7 +34,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
// Always check for valid flags, even if not creating.
if flags&^(linux.SFD_NONBLOCK|linux.SFD_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Is this a change to an existing signalfd?
@@ -44,7 +43,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
if fd != -1 {
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -55,7 +54,7 @@ func sharedSignalfd(t *kernel.Task, fd int32, sigset hostarch.Addr, sigsetsize u
}
// Not a signalfd.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
fileFlags := uint32(linux.O_RDWR)
diff --git a/pkg/sentry/syscalls/linux/vfs2/socket.go b/pkg/sentry/syscalls/linux/vfs2/socket.go
index 69f69e3af..48be5a88d 100644
--- a/pkg/sentry/syscalls/linux/vfs2/socket.go
+++ b/pkg/sentry/syscalls/linux/vfs2/socket.go
@@ -18,6 +18,8 @@ import (
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -29,10 +31,7 @@ import (
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/syserr"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// maxAddrLen is the maximum socket address length we're willing to accept.
@@ -117,7 +116,7 @@ type multipleMessageHeader64 struct {
// from the untrusted address space range.
func CaptureAddress(t *kernel.Task, addr hostarch.Addr, addrlen uint32) ([]byte, error) {
if addrlen > maxAddrLen {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
addrBuf := make([]byte, addrlen)
@@ -139,7 +138,7 @@ func writeAddress(t *kernel.Task, addr linux.SockAddr, addrLen uint32, addrPtr h
}
if int32(bufLen) < 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Write the length unconditionally.
@@ -173,7 +172,7 @@ func Socket(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Check and initialize the flags.
if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Create the new socket.
@@ -206,7 +205,7 @@ func SocketPair(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Check and initialize the flags.
if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Create the socket pair.
@@ -256,14 +255,14 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Capture address and call syscall implementation.
@@ -273,7 +272,7 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
}
blocking := (file.StatusFlags() & linux.SOCK_NONBLOCK) == 0
- return 0, nil, syserror.ConvertIntr(s.Connect(t, a, blocking).ToError(), syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(s.Connect(t, a, blocking).ToError(), linuxerr.ERESTARTSYS)
}
// accept is the implementation of the accept syscall. It is called by accept
@@ -281,20 +280,20 @@ func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr, flags int) (uintptr, error) {
// Check that no unsupported flags are passed in.
if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
// Call the syscall implementation for this socket, then copy the
@@ -304,12 +303,12 @@ func accept(t *kernel.Task, fd int32, addr hostarch.Addr, addrLen hostarch.Addr,
peerRequested := addrLen != 0
nfd, peer, peerLen, e := s.Accept(t, peerRequested, flags, blocking)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
if peerRequested {
// NOTE(magi): Linux does not give you an error if it can't
// write the data back out so neither do we.
- if err := writeAddress(t, peer, peerLen, addr, addrLen); err == syserror.EINVAL {
+ if err := writeAddress(t, peer, peerLen, addr, addrLen); linuxerr.Equals(linuxerr.EINVAL, err) {
return 0, err
}
}
@@ -346,14 +345,14 @@ func Bind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallC
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Capture address and call syscall implementation.
@@ -373,14 +372,14 @@ func Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if backlog > maxListenBacklog {
@@ -411,21 +410,21 @@ func Shutdown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Validate how, then call syscall implementation.
switch how {
case linux.SHUT_RD, linux.SHUT_WR, linux.SHUT_RDWR:
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
return 0, nil, s.Shutdown(t, int(how)).ToError()
@@ -442,14 +441,14 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Read the length. Reject negative values.
@@ -458,7 +457,7 @@ func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
return 0, nil, err
}
if optLen < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Call syscall implementation then copy both value and value len out.
@@ -523,21 +522,21 @@ func SetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if optLen < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if optLen > maxOptLen {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
buf := t.CopyScratchBuffer(int(optLen))
if _, err := t.CopyInBytes(optValAddr, buf); err != nil {
@@ -561,14 +560,14 @@ func GetSockName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Get the socket name and copy it to the caller.
@@ -589,14 +588,14 @@ func GetPeerName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.S
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Get the socket peer name and copy it to the caller.
@@ -616,25 +615,25 @@ func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -664,7 +663,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if vlen > linux.UIO_MAXIOV {
@@ -673,20 +672,20 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -701,7 +700,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, err
}
if !ts.Valid() {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
deadline = t.Kernel().MonotonicClock().Now().Add(ts.ToDuration())
haveDeadline = true
@@ -721,7 +720,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := uint64(0); i < uint64(vlen); i++ {
mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
var n uintptr
if n, err = recvSingleMsg(t, s, mp, flags, haveDeadline, deadline); err != nil {
@@ -731,7 +730,7 @@ func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Copy the received length to the caller.
lp, ok := mp.AddLength(messageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil {
break
@@ -753,7 +752,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, fl
}
if msg.IovLen > linux.UIO_MAXIOV {
- return 0, syserror.EMSGSIZE
+ return 0, linuxerr.EMSGSIZE
}
dst, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
@@ -766,7 +765,7 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, fl
if msg.ControlLen == 0 && msg.NameLen == 0 {
n, mflags, _, _, cms, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0)
if err != nil {
- return 0, syserror.ConvertIntr(err.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(err.ToError(), linuxerr.ERESTARTSYS)
}
if !cms.Unix.Empty() {
mflags |= linux.MSG_CTRUNC
@@ -784,11 +783,11 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, fl
}
if msg.ControlLen > maxControlLen {
- return 0, syserror.ENOBUFS
+ return 0, linuxerr.ENOBUFS
}
n, mflags, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
defer cms.Release(t)
@@ -833,25 +832,25 @@ func recvSingleMsg(t *kernel.Task, s socket.SocketVFS2, msgPtr hostarch.Addr, fl
// recvfrom and recv syscall handlers.
func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLenPtr hostarch.Addr) (uintptr, error) {
if int(bufLen) < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Reject flags that we don't handle yet.
if flags & ^(baseRecvFlags|linux.MSG_PEEK|linux.MSG_CONFIRM) != 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -877,7 +876,7 @@ func recvFrom(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, fla
n, _, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0)
cm.Release(t)
if e != nil {
- return 0, syserror.ConvertIntr(e.ToError(), syserror.ERESTARTSYS)
+ return 0, syserr.ConvertIntr(e.ToError(), linuxerr.ERESTARTSYS)
}
// Copy the address to the caller.
@@ -911,25 +910,25 @@ func SendMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -949,7 +948,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
if t.Arch().Width() != 8 {
// We only handle 64-bit for now.
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if vlen > linux.UIO_MAXIOV {
@@ -959,19 +958,19 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, nil, syserror.ENOTSOCK
+ return 0, nil, linuxerr.ENOTSOCK
}
// Reject flags that we don't handle yet.
if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -983,7 +982,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
for i := uint64(0); i < uint64(vlen); i++ {
mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
var n uintptr
if n, err = sendSingleMsg(t, s, file, mp, flags); err != nil {
@@ -993,7 +992,7 @@ func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// Copy the received length to the caller.
lp, ok := mp.AddLength(messageHeader64Len)
if !ok {
- return 0, nil, syserror.EFAULT
+ return 0, nil, linuxerr.EFAULT
}
if _, err = primitive.CopyUint32Out(t, lp, uint32(n)); err != nil {
break
@@ -1018,7 +1017,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
if msg.ControlLen > 0 {
// Put an upper bound to prevent large allocations.
if msg.ControlLen > maxControlLen {
- return 0, syserror.ENOBUFS
+ return 0, linuxerr.ENOBUFS
}
controlData = make([]byte, msg.ControlLen)
if _, err := t.CopyInBytes(hostarch.Addr(msg.Control), controlData); err != nil {
@@ -1038,7 +1037,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
// Read data then call the sendmsg implementation.
if msg.IovLen > linux.UIO_MAXIOV {
- return 0, syserror.EMSGSIZE
+ return 0, linuxerr.EMSGSIZE
}
src, err := t.IovecsIOSequence(hostarch.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{
AddressSpaceActive: true,
@@ -1063,7 +1062,7 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
// Call the syscall implementation.
n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, controlMessages)
- err = slinux.HandleIOErrorVFS2(t, n != 0, e.ToError(), syserror.ERESTARTSYS, "sendmsg", file)
+ err = slinux.HandleIOErrorVFS2(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendmsg", file)
// Control messages should be released on error as well as for zero-length
// messages, which are discarded by the receiver.
if n == 0 || err != nil {
@@ -1077,20 +1076,20 @@ func sendSingleMsg(t *kernel.Task, s socket.SocketVFS2, file *vfs.FileDescriptio
func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags int32, namePtr hostarch.Addr, nameLen uint32) (uintptr, error) {
bl := int(bufLen)
if bl < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Get socket from the file descriptor.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
defer file.DecRef(t)
// Extract the socket.
s, ok := file.Impl().(socket.SocketVFS2)
if !ok {
- return 0, syserror.ENOTSOCK
+ return 0, linuxerr.ENOTSOCK
}
if (file.StatusFlags() & linux.SOCK_NONBLOCK) != 0 {
@@ -1125,7 +1124,7 @@ func sendTo(t *kernel.Task, fd int32, bufPtr hostarch.Addr, bufLen uint64, flags
// Call the syscall implementation.
n, e := s.SendMsg(t, src, to, int(flags), haveDeadline, deadline, socket.ControlMessages{Unix: control.New(t, s, nil)})
- return uintptr(n), slinux.HandleIOErrorVFS2(t, n != 0, e.ToError(), syserror.ERESTARTSYS, "sendto", file)
+ return uintptr(n), slinux.HandleIOErrorVFS2(t, n != 0, e.ToError(), linuxerr.ERESTARTSYS, "sendto", file)
}
// SendTo implements the linux syscall sendto(2).
diff --git a/pkg/sentry/syscalls/linux/vfs2/splice.go b/pkg/sentry/syscalls/linux/vfs2/splice.go
index 19e175203..0205f09e0 100644
--- a/pkg/sentry/syscalls/linux/vfs2/splice.go
+++ b/pkg/sentry/syscalls/linux/vfs2/splice.go
@@ -18,6 +18,7 @@ import (
"io"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
@@ -25,7 +26,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/pipe"
slinux "gvisor.dev/gvisor/pkg/sentry/syscalls/linux"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -46,29 +46,29 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
count = int64(kernel.MAX_RW_COUNT)
}
if count < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check for invalid flags.
if flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get file descriptions.
inFile := t.GetFileVFS2(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
outFile := t.GetFileVFS2(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
// Check that both files support the required directionality.
if !inFile.IsReadable() || !outFile.IsWritable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// The operation is non-blocking if anything is non-blocking.
@@ -82,38 +82,38 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
inPipeFD, inIsPipe := inFile.Impl().(*pipe.VFSPipeFD)
outPipeFD, outIsPipe := outFile.Impl().(*pipe.VFSPipeFD)
if !inIsPipe && !outIsPipe {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Copy in offsets.
inOffset := int64(-1)
if inOffsetPtr != 0 {
if inIsPipe {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
if inFile.Options().DenyPRead {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if _, err := primitive.CopyInt64In(t, inOffsetPtr, &inOffset); err != nil {
return 0, nil, err
}
if inOffset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
outOffset := int64(-1)
if outOffsetPtr != 0 {
if outIsPipe {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
if outFile.Options().DenyPWrite {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if _, err := primitive.CopyInt64In(t, outOffsetPtr, &outOffset); err != nil {
return 0, nil, err
}
if outOffset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
@@ -150,7 +150,7 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
panic("at least one end of splice must be a pipe")
}
- if n != 0 || err != syserror.ErrWouldBlock || nonBlock {
+ if n != 0 || err != linuxerr.ErrWouldBlock || nonBlock {
break
}
if err = dw.waitForBoth(t); err != nil {
@@ -172,7 +172,7 @@ func Splice(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
// We can only pass a single file to handleIOError, so pick inFile arbitrarily.
// This is used only for debugging purposes.
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "splice", outFile)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "splice", outFile)
}
// Tee implements Linux syscall tee(2).
@@ -189,29 +189,29 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
count = int64(kernel.MAX_RW_COUNT)
}
if count < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Check for invalid flags.
if flags&^(linux.SPLICE_F_MOVE|linux.SPLICE_F_NONBLOCK|linux.SPLICE_F_MORE|linux.SPLICE_F_GIFT) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Get file descriptions.
inFile := t.GetFileVFS2(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
outFile := t.GetFileVFS2(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
// Check that both files support the required directionality.
if !inFile.IsReadable() || !outFile.IsWritable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// The operation is non-blocking if anything is non-blocking.
@@ -225,7 +225,7 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
inPipeFD, inIsPipe := inFile.Impl().(*pipe.VFSPipeFD)
outPipeFD, outIsPipe := outFile.Impl().(*pipe.VFSPipeFD)
if !inIsPipe || !outIsPipe {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Copy data.
@@ -240,7 +240,7 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
defer dw.destroy()
for {
n, err = pipe.Tee(t, outPipeFD, inPipeFD, count)
- if n != 0 || err != syserror.ErrWouldBlock || nonBlock {
+ if n != 0 || err != linuxerr.ErrWouldBlock || nonBlock {
break
}
if err = dw.waitForBoth(t); err != nil {
@@ -250,7 +250,7 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
if n != 0 {
// If a partial write is completed, the error is dropped. Log it here.
- if err != nil && err != io.EOF && err != syserror.ErrWouldBlock {
+ if err != nil && err != io.EOF && err != linuxerr.ErrWouldBlock {
log.Debugf("tee completed a partial write with error: %v", err)
err = nil
}
@@ -258,7 +258,7 @@ func Tee(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallCo
// We can only pass a single file to handleIOError, so pick inFile arbitrarily.
// This is used only for debugging purposes.
- return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, syserror.ERESTARTSYS, "tee", inFile)
+ return uintptr(n), nil, slinux.HandleIOErrorVFS2(t, n != 0, err, linuxerr.ERESTARTSYS, "tee", inFile)
}
// Sendfile implements linux system call sendfile(2).
@@ -270,25 +270,25 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
inFile := t.GetFileVFS2(inFD)
if inFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer inFile.DecRef(t)
if !inFile.IsReadable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
outFile := t.GetFileVFS2(outFD)
if outFile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer outFile.DecRef(t)
if !outFile.IsWritable() {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
// Verify that the outFile Append flag is not set.
if outFile.StatusFlags()&linux.O_APPEND != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Verify that inFile is a regular file or block device. This is a
@@ -298,14 +298,14 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
return 0, nil, err
} else if stat.Mask&linux.STATX_TYPE == 0 ||
(stat.Mode&linux.S_IFMT != linux.S_IFREG && stat.Mode&linux.S_IFMT != linux.S_IFBLK) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Copy offset if it exists.
offset := int64(-1)
if offsetAddr != 0 {
if inFile.Options().DenyPRead {
- return 0, nil, syserror.ESPIPE
+ return 0, nil, linuxerr.ESPIPE
}
var offsetP primitive.Int64
if _, err := offsetP.CopyIn(t, offsetAddr); err != nil {
@@ -314,16 +314,16 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
offset = int64(offsetP)
if offset < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if offset+count < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
}
// Validate count. This must come after offset checks.
if count < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if count == 0 {
return 0, nil, nil
@@ -359,10 +359,10 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
break
}
if err == nil && t.Interrupted() {
- err = syserror.ErrInterrupted
+ err = linuxerr.ErrInterrupted
break
}
- if err == syserror.ErrWouldBlock && !nonBlock {
+ if err == linuxerr.ErrWouldBlock && !nonBlock {
err = dw.waitForBoth(t)
}
if err != nil {
@@ -388,7 +388,7 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
var writeN int64
writeN, err = outFile.Write(t, usermem.BytesIOSequence(wbuf), vfs.WriteOptions{})
wbuf = wbuf[writeN:]
- if err == syserror.ErrWouldBlock && !nonBlock {
+ if err == linuxerr.ErrWouldBlock && !nonBlock {
err = dw.waitForOut(t)
}
if err != nil {
@@ -419,10 +419,10 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
break
}
if err == nil && t.Interrupted() {
- err = syserror.ErrInterrupted
+ err = linuxerr.ErrInterrupted
break
}
- if err == syserror.ErrWouldBlock && !nonBlock {
+ if err == linuxerr.ErrWouldBlock && !nonBlock {
err = dw.waitForBoth(t)
}
if err != nil {
@@ -440,7 +440,7 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
}
if total != 0 {
- if err != nil && err != io.EOF && err != syserror.ErrWouldBlock {
+ if err != nil && err != io.EOF && err != linuxerr.ErrWouldBlock {
// If a partial write is completed, the error is dropped. Log it here.
log.Debugf("sendfile completed a partial write with error: %v", err)
err = nil
@@ -449,7 +449,7 @@ func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysc
// We can only pass a single file to handleIOError, so pick inFile arbitrarily.
// This is used only for debugging purposes.
- return uintptr(total), nil, slinux.HandleIOErrorVFS2(t, total != 0, err, syserror.ERESTARTSYS, "sendfile", inFile)
+ return uintptr(total), nil, slinux.HandleIOErrorVFS2(t, total != 0, err, linuxerr.ERESTARTSYS, "sendfile", inFile)
}
// dualWaiter is used to wait on one or both vfs.FileDescriptions. It is not
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat.go b/pkg/sentry/syscalls/linux/vfs2/stat.go
index 69e77fa99..adaf8db3f 100644
--- a/pkg/sentry/syscalls/linux/vfs2/stat.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat.go
@@ -17,15 +17,14 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// Stat implements Linux syscall stat(2).
@@ -53,7 +52,7 @@ func Newfstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flags int32) error {
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
opts := vfs.StatOptions{
@@ -70,7 +69,7 @@ func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flag
start := root
if !path.Absolute {
if !path.HasComponents() && flags&linux.AT_EMPTY_PATH == 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectoryVFS2()
@@ -78,7 +77,7 @@ func fstatat(t *kernel.Task, dirfd int32, pathAddr, statAddr hostarch.Addr, flag
} else {
dirfile := t.GetFileVFS2(dirfd)
if dirfile == nil {
- return syserror.EBADF
+ return linuxerr.EBADF
}
if !path.HasComponents() {
// Use FileDescription.Stat() instead of
@@ -131,7 +130,7 @@ func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -156,15 +155,15 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
statxAddr := args[4].Pointer()
if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW|linux.AT_STATX_SYNC_TYPE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Make sure that only one sync type option is set.
syncType := uint32(flags & linux.AT_STATX_SYNC_TYPE)
if syncType != 0 && !bits.IsPowerOfTwo32(syncType) {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if mask&linux.STATX__RESERVED != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
opts := vfs.StatOptions{
@@ -182,7 +181,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
start := root
if !path.Absolute {
if !path.HasComponents() && flags&linux.AT_EMPTY_PATH == 0 {
- return 0, nil, syserror.ENOENT
+ return 0, nil, linuxerr.ENOENT
}
if dirfd == linux.AT_FDCWD {
start = t.FSContext().WorkingDirectoryVFS2()
@@ -190,7 +189,7 @@ func Statx(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
} else {
dirfile := t.GetFileVFS2(dirfd)
if dirfile == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
if !path.HasComponents() {
// Use FileDescription.Stat() instead of
@@ -272,7 +271,7 @@ func accessAt(t *kernel.Task, dirfd int32, pathAddr hostarch.Addr, mode uint) er
// Sanity check the mode.
if mode&^(rOK|wOK|xOK) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
path, err := copyInPath(t, pathAddr)
@@ -315,7 +314,7 @@ func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
func readlinkat(t *kernel.Task, dirfd int32, pathAddr, bufAddr hostarch.Addr, size uint) (uintptr, *kernel.SyscallControl, error) {
if int(size) <= 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
path, err := copyInPath(t, pathAddr)
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat_amd64.go b/pkg/sentry/syscalls/linux/vfs2/stat_amd64.go
index 2da538fc6..122921b52 100644
--- a/pkg/sentry/syscalls/linux/vfs2/stat_amd64.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package vfs2
diff --git a/pkg/sentry/syscalls/linux/vfs2/stat_arm64.go b/pkg/sentry/syscalls/linux/vfs2/stat_arm64.go
index 88b9c7627..d32031481 100644
--- a/pkg/sentry/syscalls/linux/vfs2/stat_arm64.go
+++ b/pkg/sentry/syscalls/linux/vfs2/stat_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package vfs2
diff --git a/pkg/sentry/syscalls/linux/vfs2/sync.go b/pkg/sentry/syscalls/linux/vfs2/sync.go
index 1f8a5878c..cfc693422 100644
--- a/pkg/sentry/syscalls/linux/vfs2/sync.go
+++ b/pkg/sentry/syscalls/linux/vfs2/sync.go
@@ -16,9 +16,10 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/syserr"
)
// Sync implements Linux syscall sync(2).
@@ -32,12 +33,12 @@ func Syncfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscal
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
if file.StatusFlags()&linux.O_PATH != 0 {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
return 0, nil, file.SyncFS(t)
@@ -49,7 +50,7 @@ func Fsync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -71,15 +72,15 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
// Check for negative values and overflow.
if offset < 0 || offset+nbytes < 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
if flags&^(linux.SYNC_FILE_RANGE_WAIT_BEFORE|linux.SYNC_FILE_RANGE_WRITE|linux.SYNC_FILE_RANGE_WAIT_AFTER) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -107,12 +108,12 @@ func SyncFileRange(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
if flags&linux.SYNC_FILE_RANGE_WAIT_BEFORE != 0 &&
flags&linux.SYNC_FILE_RANGE_WAIT_AFTER == 0 {
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
}
if flags&linux.SYNC_FILE_RANGE_WAIT_AFTER != 0 {
if err := file.Sync(t); err != nil {
- return 0, nil, syserror.ConvertIntr(err, syserror.ERESTARTSYS)
+ return 0, nil, syserr.ConvertIntr(err, linuxerr.ERESTARTSYS)
}
}
return 0, nil, nil
diff --git a/pkg/sentry/syscalls/linux/vfs2/timerfd.go b/pkg/sentry/syscalls/linux/vfs2/timerfd.go
index 250870c03..b8f96a757 100644
--- a/pkg/sentry/syscalls/linux/vfs2/timerfd.go
+++ b/pkg/sentry/syscalls/linux/vfs2/timerfd.go
@@ -16,11 +16,11 @@ package vfs2
import (
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/timerfd"
"gvisor.dev/gvisor/pkg/sentry/kernel"
ktime "gvisor.dev/gvisor/pkg/sentry/kernel/time"
- "gvisor.dev/gvisor/pkg/syserror"
)
// TimerfdCreate implements Linux syscall timerfd_create(2).
@@ -29,7 +29,7 @@ func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
flags := args[1].Int()
if flags&^(linux.TFD_CLOEXEC|linux.TFD_NONBLOCK) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
// Timerfds aren't writable per se (their implementation of Write just
@@ -47,7 +47,7 @@ func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel
case linux.CLOCK_MONOTONIC, linux.CLOCK_BOOTTIME:
clock = t.Kernel().MonotonicClock()
default:
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
vfsObj := t.Kernel().VFS()
file, err := timerfd.New(t, vfsObj, clock, fileFlags)
@@ -72,18 +72,18 @@ func TimerfdSettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
oldValAddr := args[3].Pointer()
if flags&^(linux.TFD_TIMER_ABSTIME) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
tfd, ok := file.Impl().(*timerfd.TimerFileDescription)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
var newVal linux.Itimerspec
@@ -111,13 +111,13 @@ func TimerfdGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kerne
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
tfd, ok := file.Impl().(*timerfd.TimerFileDescription)
if !ok {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
tm, s := tfd.GetTime()
diff --git a/pkg/sentry/syscalls/linux/vfs2/xattr.go b/pkg/sentry/syscalls/linux/vfs2/xattr.go
index c261050c6..7b2f69c45 100644
--- a/pkg/sentry/syscalls/linux/vfs2/xattr.go
+++ b/pkg/sentry/syscalls/linux/vfs2/xattr.go
@@ -18,13 +18,12 @@ import (
"bytes"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
-
- "gvisor.dev/gvisor/pkg/hostarch"
)
// ListXattr implements Linux syscall listxattr(2).
@@ -71,7 +70,7 @@ func Flistxattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sy
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -140,7 +139,7 @@ func Fgetxattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -178,7 +177,7 @@ func setxattr(t *kernel.Task, args arch.SyscallArguments, shouldFollowFinalSymli
flags := args[4].Int()
if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
path, err := copyInPath(t, pathAddr)
@@ -216,12 +215,12 @@ func Fsetxattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sys
flags := args[4].Int()
if flags&^(linux.XATTR_CREATE|linux.XATTR_REPLACE) != 0 {
- return 0, nil, syserror.EINVAL
+ return 0, nil, linuxerr.EINVAL
}
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -280,7 +279,7 @@ func Fremovexattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
file := t.GetFileVFS2(fd)
if file == nil {
- return 0, nil, syserror.EBADF
+ return 0, nil, linuxerr.EBADF
}
defer file.DecRef(t)
@@ -295,13 +294,13 @@ func Fremovexattr(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.
func copyInXattrName(t *kernel.Task, nameAddr hostarch.Addr) (string, error) {
name, err := t.CopyInString(nameAddr, linux.XATTR_NAME_MAX+1)
if err != nil {
- if err == syserror.ENAMETOOLONG {
- return "", syserror.ERANGE
+ if linuxerr.Equals(linuxerr.ENAMETOOLONG, err) {
+ return "", linuxerr.ERANGE
}
return "", err
}
if len(name) == 0 {
- return "", syserror.ERANGE
+ return "", linuxerr.ERANGE
}
return name, nil
}
@@ -321,16 +320,16 @@ func copyOutXattrNameList(t *kernel.Task, listAddr hostarch.Addr, size uint, nam
}
if buf.Len() > int(size) {
if size >= linux.XATTR_LIST_MAX {
- return 0, syserror.E2BIG
+ return 0, linuxerr.E2BIG
}
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
return t.CopyOutBytes(listAddr, buf.Bytes())
}
func copyInXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint) (string, error) {
if size > linux.XATTR_SIZE_MAX {
- return "", syserror.E2BIG
+ return "", linuxerr.E2BIG
}
buf := make([]byte, size)
if _, err := t.CopyInBytes(valueAddr, buf); err != nil {
@@ -349,9 +348,9 @@ func copyOutXattrValue(t *kernel.Task, valueAddr hostarch.Addr, size uint, value
}
if len(value) > int(size) {
if size >= linux.XATTR_SIZE_MAX {
- return 0, syserror.E2BIG
+ return 0, linuxerr.E2BIG
}
- return 0, syserror.ERANGE
+ return 0, linuxerr.ERANGE
}
return t.CopyOutBytes(valueAddr, gohacks.ImmutableBytesFromString(value))
}
diff --git a/pkg/sentry/syscalls/syscalls.go b/pkg/sentry/syscalls/syscalls.go
index f88055676..cfcc21271 100644
--- a/pkg/sentry/syscalls/syscalls.go
+++ b/pkg/sentry/syscalls/syscalls.go
@@ -28,9 +28,9 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Supported returns a syscall that is fully supported.
@@ -99,13 +99,13 @@ func CapError(name string, c linux.Capability, note string, urls []string) kerne
Name: name,
Fn: func(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
if !t.HasCapability(c) {
- return 0, nil, syserror.EPERM
+ return 0, nil, linuxerr.EPERM
}
t.Kernel().EmitUnimplementedEvent(t)
- return 0, nil, syserror.ENOSYS
+ return 0, nil, linuxerr.ENOSYS
},
SupportLevel: kernel.SupportUnimplemented,
- Note: fmt.Sprintf("%sReturns %q if the process does not have %s; %q otherwise.", note, syserror.EPERM, c.String(), syserror.ENOSYS),
+ Note: fmt.Sprintf("%sReturns %q if the process does not have %s; %q otherwise.", note, linuxerr.EPERM, c.String(), linuxerr.ENOSYS),
URLs: urls,
}
}
diff --git a/pkg/sentry/time/BUILD b/pkg/sentry/time/BUILD
index 362dea76d..c21971322 100644
--- a/pkg/sentry/time/BUILD
+++ b/pkg/sentry/time/BUILD
@@ -25,6 +25,8 @@ go_library(
"muldiv_arm64.s",
"parameters.go",
"sampler.go",
+ "sampler_amd64.go",
+ "sampler_arm64.go",
"sampler_unsafe.go",
"seqatomic_parameters_unsafe.go",
"tsc_amd64.s",
@@ -32,11 +34,11 @@ go_library(
],
visibility = ["//:sandbox"],
deps = [
+ "//pkg/errors/linuxerr",
"//pkg/gohacks",
"//pkg/log",
"//pkg/metric",
"//pkg/sync",
- "//pkg/syserror",
"@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/sentry/time/calibrated_clock.go b/pkg/sentry/time/calibrated_clock.go
index 39bf1e0de..eed74f6bd 100644
--- a/pkg/sentry/time/calibrated_clock.go
+++ b/pkg/sentry/time/calibrated_clock.go
@@ -19,10 +19,10 @@ package time
import (
"time"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/metric"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// CalibratedClock implements a clock that tracks a reference clock.
@@ -259,6 +259,6 @@ func (c *CalibratedClocks) GetTime(id ClockID) (int64, error) {
case Realtime:
return c.realtime.GetTime()
default:
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
}
diff --git a/pkg/sentry/time/calibrated_clock_test.go b/pkg/sentry/time/calibrated_clock_test.go
index d6622bfe2..0a4b1f1bf 100644
--- a/pkg/sentry/time/calibrated_clock_test.go
+++ b/pkg/sentry/time/calibrated_clock_test.go
@@ -50,6 +50,7 @@ func TestConstantFrequency(t *testing.T) {
if !c.ready {
c.mu.RUnlock()
t.Fatalf("clock not ready")
+ return // For checklocks consistency.
}
// A bit after the last sample.
now, ok := c.params.ComputeTime(750000)
diff --git a/pkg/sentry/time/sampler.go b/pkg/sentry/time/sampler.go
index 4ac9c4474..24a47f5d5 100644
--- a/pkg/sentry/time/sampler.go
+++ b/pkg/sentry/time/sampler.go
@@ -21,13 +21,6 @@ import (
)
const (
- // defaultOverheadTSC is the default estimated syscall overhead in TSC cycles.
- // It is further refined as syscalls are made.
- defaultOverheadCycles = 1 * 1000
-
- // maxOverheadCycles is the maximum allowed syscall overhead in TSC cycles.
- maxOverheadCycles = 100 * defaultOverheadCycles
-
// maxSampleLoops is the maximum number of times to try to get a clock sample
// under the expected overhead.
maxSampleLoops = 5
diff --git a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go b/pkg/sentry/time/sampler_amd64.go
index a4bc08411..5fa1832b4 100644
--- a/pkg/sentry/fsimpl/ext/disklayout/test_utils.go
+++ b/pkg/sentry/time/sampler_amd64.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The gVisor Authors.
+// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,19 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package disklayout
+//go:build amd64
+// +build amd64
-import (
- "reflect"
- "testing"
+package time
- "gvisor.dev/gvisor/pkg/marshal"
-)
-
-func assertSize(t *testing.T, v marshal.Marshallable, want int) {
- t.Helper()
+const (
+ // defaultOverheadTSC is the default estimated syscall overhead in TSC cycles.
+ // It is further refined as syscalls are made.
+ defaultOverheadCycles = 1 * 1000
- if got := v.SizeBytes(); got != want {
- t.Errorf("struct %s should be exactly %d bytes but is %d bytes", reflect.TypeOf(v).Name(), want, got)
- }
-}
+ // maxOverheadCycles is the maximum allowed syscall overhead in TSC cycles.
+ maxOverheadCycles = 100 * defaultOverheadCycles
+)
diff --git a/pkg/sentry/time/sampler_arm64.go b/pkg/sentry/time/sampler_arm64.go
new file mode 100644
index 000000000..3560e66ae
--- /dev/null
+++ b/pkg/sentry/time/sampler_arm64.go
@@ -0,0 +1,43 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build arm64
+// +build arm64
+
+package time
+
+// getCNTFRQ get ARM counter-timer frequency
+func getCNTFRQ() TSCValue
+
+// getDefaultArchOverheadCycles get default OverheadCycles based on
+// ARM counter-timer frequency. Usually ARM counter-timer frequency
+// is range from 1-50Mhz which is much less than that on x86, so we
+// calibrate defaultOverheadCycles for ARM.
+func getDefaultArchOverheadCycles() TSCValue {
+ // estimated the clock frequency on x86 is 1Ghz.
+ // 1Ghz devided by counter-timer frequency of ARM to get
+ // frqRatio. defaultOverheadCycles of ARM equals to that on
+ // x86 devided by frqRatio
+ cntfrq := getCNTFRQ()
+ frqRatio := 1000000000 / cntfrq
+ overheadCycles := (1 * 1000) / frqRatio
+ return overheadCycles
+}
+
+// defaultOverheadTSC is the default estimated syscall overhead in TSC cycles.
+// It is further refined as syscalls are made.
+var defaultOverheadCycles = getDefaultArchOverheadCycles()
+
+// maxOverheadCycles is the maximum allowed syscall overhead in TSC cycles.
+var maxOverheadCycles = 100 * defaultOverheadCycles
diff --git a/pkg/sentry/time/tsc_arm64.s b/pkg/sentry/time/tsc_arm64.s
index da9fa4112..711349fa1 100644
--- a/pkg/sentry/time/tsc_arm64.s
+++ b/pkg/sentry/time/tsc_arm64.s
@@ -20,3 +20,9 @@ TEXT ·Rdtsc(SB),NOSPLIT,$0-8
WORD $0xd53be040 //MRS CNTVCT_EL0, R0
MOVD R0, ret+0(FP)
RET
+
+TEXT ·getCNTFRQ(SB),NOSPLIT,$0-8
+ // Get the virtual counter frequency.
+ WORD $0xd53be000 //MRS CNTFRQ_EL0, R0
+ MOVD R0, ret+0(FP)
+ RET
diff --git a/pkg/sentry/usage/memory.go b/pkg/sentry/usage/memory.go
index 581862ee2..e7073ec87 100644
--- a/pkg/sentry/usage/memory.go
+++ b/pkg/sentry/usage/memory.go
@@ -132,7 +132,7 @@ func Init() error {
// always be the case for a newly mapped page from /dev/shm. If we obtain
// the shared memory through some other means in the future, we may have to
// explicitly zero the page.
- mmap, err := unix.Mmap(int(file.Fd()), 0, int(RTMemoryStatsSize), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED)
+ mmap, err := memutil.MapFile(0, RTMemoryStatsSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED, file.Fd(), 0)
if err != nil {
return fmt.Errorf("error mapping usage file: %v", err)
}
diff --git a/pkg/sentry/usage/memory_unsafe.go b/pkg/sentry/usage/memory_unsafe.go
index 9e0014ca0..bc1531b91 100644
--- a/pkg/sentry/usage/memory_unsafe.go
+++ b/pkg/sentry/usage/memory_unsafe.go
@@ -21,7 +21,7 @@ import (
// RTMemoryStatsSize is the size of the RTMemoryStats struct.
var RTMemoryStatsSize = unsafe.Sizeof(RTMemoryStats{})
-// RTMemoryStatsPointer casts the address of the byte slice into a RTMemoryStats pointer.
-func RTMemoryStatsPointer(b []byte) *RTMemoryStats {
- return (*RTMemoryStats)(unsafe.Pointer(&b[0]))
+// RTMemoryStatsPointer casts addr to a RTMemoryStats pointer.
+func RTMemoryStatsPointer(addr uintptr) *RTMemoryStats {
+ return (*RTMemoryStats)(unsafe.Pointer(addr))
}
diff --git a/pkg/sentry/vfs/BUILD b/pkg/sentry/vfs/BUILD
index ac60fe8bf..914574543 100644
--- a/pkg/sentry/vfs/BUILD
+++ b/pkg/sentry/vfs/BUILD
@@ -95,6 +95,7 @@ go_library(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/fd",
"//pkg/fdnotifier",
"//pkg/fspath",
@@ -115,7 +116,6 @@ go_library(
"//pkg/sentry/socket/unix/transport",
"//pkg/sentry/uniqueid",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
"//pkg/waiter",
"@org_golang_x_sys//unix:go_default_library",
@@ -133,9 +133,9 @@ go_test(
deps = [
"//pkg/abi/linux",
"//pkg/context",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/contexttest",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/usermem",
],
)
diff --git a/pkg/sentry/vfs/README.md b/pkg/sentry/vfs/README.md
index 5aad31b78..82ee2c521 100644
--- a/pkg/sentry/vfs/README.md
+++ b/pkg/sentry/vfs/README.md
@@ -1,9 +1,5 @@
# The gVisor Virtual Filesystem
-THIS PACKAGE IS CURRENTLY EXPERIMENTAL AND NOT READY OR ENABLED FOR PRODUCTION
-USE. For the filesystem implementation currently used by gVisor, see the `fs`
-package.
-
## Implementation Notes
### Reference Counting
diff --git a/pkg/sentry/vfs/anonfs.go b/pkg/sentry/vfs/anonfs.go
index f48817132..255d3992e 100644
--- a/pkg/sentry/vfs/anonfs.go
+++ b/pkg/sentry/vfs/anonfs.go
@@ -19,11 +19,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
- "gvisor.dev/gvisor/pkg/syserror"
)
// NewAnonVirtualDentry returns a VirtualDentry with the given synthetic name,
@@ -101,7 +101,7 @@ func (fs *anonFilesystem) Sync(ctx context.Context) error {
// AccessAt implements vfs.Filesystem.Impl.AccessAt.
func (fs *anonFilesystem) AccessAt(ctx context.Context, rp *ResolvingPath, creds *auth.Credentials, ats AccessTypes) error {
if !rp.Done() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
return GenericCheckPermissions(creds, ats, anonFileMode, anonFileUID, anonFileGID)
}
@@ -109,10 +109,10 @@ func (fs *anonFilesystem) AccessAt(ctx context.Context, rp *ResolvingPath, creds
// GetDentryAt implements FilesystemImpl.GetDentryAt.
func (fs *anonFilesystem) GetDentryAt(ctx context.Context, rp *ResolvingPath, opts GetDentryOptions) (*Dentry, error) {
if !rp.Done() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if opts.CheckSearchable {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// anonDentry no-ops refcounting.
return rp.Start(), nil
@@ -121,7 +121,7 @@ func (fs *anonFilesystem) GetDentryAt(ctx context.Context, rp *ResolvingPath, op
// GetParentDentryAt implements FilesystemImpl.GetParentDentryAt.
func (fs *anonFilesystem) GetParentDentryAt(ctx context.Context, rp *ResolvingPath) (*Dentry, error) {
if !rp.Final() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
// anonDentry no-ops refcounting.
return rp.Start(), nil
@@ -130,63 +130,63 @@ func (fs *anonFilesystem) GetParentDentryAt(ctx context.Context, rp *ResolvingPa
// LinkAt implements FilesystemImpl.LinkAt.
func (fs *anonFilesystem) LinkAt(ctx context.Context, rp *ResolvingPath, vd VirtualDentry) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// MkdirAt implements FilesystemImpl.MkdirAt.
func (fs *anonFilesystem) MkdirAt(ctx context.Context, rp *ResolvingPath, opts MkdirOptions) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// MknodAt implements FilesystemImpl.MknodAt.
func (fs *anonFilesystem) MknodAt(ctx context.Context, rp *ResolvingPath, opts MknodOptions) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// OpenAt implements FilesystemImpl.OpenAt.
func (fs *anonFilesystem) OpenAt(ctx context.Context, rp *ResolvingPath, opts OpenOptions) (*FileDescription, error) {
if !rp.Done() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
- return nil, syserror.ENODEV
+ return nil, linuxerr.ENODEV
}
// ReadlinkAt implements FilesystemImpl.ReadlinkAt.
func (fs *anonFilesystem) ReadlinkAt(ctx context.Context, rp *ResolvingPath) (string, error) {
if !rp.Done() {
- return "", syserror.ENOTDIR
+ return "", linuxerr.ENOTDIR
}
- return "", syserror.EINVAL
+ return "", linuxerr.EINVAL
}
// RenameAt implements FilesystemImpl.RenameAt.
func (fs *anonFilesystem) RenameAt(ctx context.Context, rp *ResolvingPath, oldParentVD VirtualDentry, oldName string, opts RenameOptions) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// RmdirAt implements FilesystemImpl.RmdirAt.
func (fs *anonFilesystem) RmdirAt(ctx context.Context, rp *ResolvingPath) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// SetStatAt implements FilesystemImpl.SetStatAt.
func (fs *anonFilesystem) SetStatAt(ctx context.Context, rp *ResolvingPath, opts SetStatOptions) error {
if !rp.Done() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Linux actually permits anon_inode_inode's metadata to be set, which is
// visible to all users of anon_inode_inode. We just silently ignore
@@ -197,7 +197,7 @@ func (fs *anonFilesystem) SetStatAt(ctx context.Context, rp *ResolvingPath, opts
// StatAt implements FilesystemImpl.StatAt.
func (fs *anonFilesystem) StatAt(ctx context.Context, rp *ResolvingPath, opts StatOptions) (linux.Statx, error) {
if !rp.Done() {
- return linux.Statx{}, syserror.ENOTDIR
+ return linux.Statx{}, linuxerr.ENOTDIR
}
// See fs/anon_inodes.c:anon_inode_init() => fs/libfs.c:alloc_anon_inode().
return linux.Statx{
@@ -218,7 +218,7 @@ func (fs *anonFilesystem) StatAt(ctx context.Context, rp *ResolvingPath, opts St
// StatFSAt implements FilesystemImpl.StatFSAt.
func (fs *anonFilesystem) StatFSAt(ctx context.Context, rp *ResolvingPath) (linux.Statfs, error) {
if !rp.Done() {
- return linux.Statfs{}, syserror.ENOTDIR
+ return linux.Statfs{}, linuxerr.ENOTDIR
}
return linux.Statfs{
Type: linux.ANON_INODE_FS_MAGIC,
@@ -229,34 +229,34 @@ func (fs *anonFilesystem) StatFSAt(ctx context.Context, rp *ResolvingPath) (linu
// SymlinkAt implements FilesystemImpl.SymlinkAt.
func (fs *anonFilesystem) SymlinkAt(ctx context.Context, rp *ResolvingPath, target string) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// UnlinkAt implements FilesystemImpl.UnlinkAt.
func (fs *anonFilesystem) UnlinkAt(ctx context.Context, rp *ResolvingPath) error {
if !rp.Final() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// BoundEndpointAt implements FilesystemImpl.BoundEndpointAt.
func (fs *anonFilesystem) BoundEndpointAt(ctx context.Context, rp *ResolvingPath, opts BoundEndpointOptions) (transport.BoundEndpoint, error) {
if !rp.Final() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
if err := GenericCheckPermissions(rp.Credentials(), MayWrite, anonFileMode, anonFileUID, anonFileGID); err != nil {
return nil, err
}
- return nil, syserror.ECONNREFUSED
+ return nil, linuxerr.ECONNREFUSED
}
// ListXattrAt implements FilesystemImpl.ListXattrAt.
func (fs *anonFilesystem) ListXattrAt(ctx context.Context, rp *ResolvingPath, size uint64) ([]string, error) {
if !rp.Done() {
- return nil, syserror.ENOTDIR
+ return nil, linuxerr.ENOTDIR
}
return nil, nil
}
@@ -264,25 +264,25 @@ func (fs *anonFilesystem) ListXattrAt(ctx context.Context, rp *ResolvingPath, si
// GetXattrAt implements FilesystemImpl.GetXattrAt.
func (fs *anonFilesystem) GetXattrAt(ctx context.Context, rp *ResolvingPath, opts GetXattrOptions) (string, error) {
if !rp.Done() {
- return "", syserror.ENOTDIR
+ return "", linuxerr.ENOTDIR
}
- return "", syserror.ENOTSUP
+ return "", linuxerr.ENOTSUP
}
// SetXattrAt implements FilesystemImpl.SetXattrAt.
func (fs *anonFilesystem) SetXattrAt(ctx context.Context, rp *ResolvingPath, opts SetXattrOptions) error {
if !rp.Done() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// RemoveXattrAt implements FilesystemImpl.RemoveXattrAt.
func (fs *anonFilesystem) RemoveXattrAt(ctx context.Context, rp *ResolvingPath, name string) error {
if !rp.Done() {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// PrependPath implements FilesystemImpl.PrependPath.
diff --git a/pkg/sentry/vfs/dentry.go b/pkg/sentry/vfs/dentry.go
index e7ca24d96..cb92b6eee 100644
--- a/pkg/sentry/vfs/dentry.go
+++ b/pkg/sentry/vfs/dentry.go
@@ -18,8 +18,8 @@ import (
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// Dentry represents a node in a Filesystem tree at which a file exists.
@@ -196,11 +196,12 @@ func (d *Dentry) OnZeroWatches(ctx context.Context) {
// PrepareDeleteDentry must be called before attempting to delete the file
// represented by d. If PrepareDeleteDentry succeeds, the caller must call
// AbortDeleteDentry or CommitDeleteDentry depending on the deletion's outcome.
+// +checklocksacquire:d.mu
func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dentry) error {
vfs.mountMu.Lock()
if mntns.mountpoints[d] != 0 {
vfs.mountMu.Unlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY // +checklocksforce: inconsistent return.
}
d.mu.Lock()
vfs.mountMu.Unlock()
@@ -211,14 +212,14 @@ func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dent
// AbortDeleteDentry must be called after PrepareDeleteDentry if the deletion
// fails.
-// +checklocks:d.mu
+// +checklocksrelease:d.mu
func (vfs *VirtualFilesystem) AbortDeleteDentry(d *Dentry) {
d.mu.Unlock()
}
// CommitDeleteDentry must be called after PrepareDeleteDentry if the deletion
// succeeds.
-// +checklocks:d.mu
+// +checklocksrelease:d.mu
func (vfs *VirtualFilesystem) CommitDeleteDentry(ctx context.Context, d *Dentry) {
d.dead = true
d.mu.Unlock()
@@ -249,16 +250,18 @@ func (vfs *VirtualFilesystem) InvalidateDentry(ctx context.Context, d *Dentry) {
// Preconditions:
// * If to is not nil, it must be a child Dentry from the same Filesystem.
// * from != to.
+// +checklocksacquire:from.mu
+// +checklocksacquire:to.mu
func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, to *Dentry) error {
vfs.mountMu.Lock()
if mntns.mountpoints[from] != 0 {
vfs.mountMu.Unlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY // +checklocksforce: no locks acquired.
}
if to != nil {
if mntns.mountpoints[to] != 0 {
vfs.mountMu.Unlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY // +checklocksforce: no locks acquired.
}
to.mu.Lock()
}
@@ -267,13 +270,13 @@ func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, t
// Return with from.mu and to.mu locked, which will be unlocked by
// AbortRenameDentry, CommitRenameReplaceDentry, or
// CommitRenameExchangeDentry.
- return nil
+ return nil // +checklocksforce: to may not be acquired.
}
// AbortRenameDentry must be called after PrepareRenameDentry if the rename
// fails.
-// +checklocks:from.mu
-// +checklocks:to.mu
+// +checklocksrelease:from.mu
+// +checklocksrelease:to.mu
func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -286,8 +289,8 @@ func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
// that was replaced by from.
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
-// +checklocks:from.mu
-// +checklocks:to.mu
+// +checklocksrelease:from.mu
+// +checklocksrelease:to.mu
func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -303,8 +306,8 @@ func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, fro
// from and to are exchanged by rename(RENAME_EXCHANGE).
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
-// +checklocks:from.mu
-// +checklocks:to.mu
+// +checklocksrelease:from.mu
+// +checklocksrelease:to.mu
func (vfs *VirtualFilesystem) CommitRenameExchangeDentry(from, to *Dentry) {
from.mu.Unlock()
to.mu.Unlock()
diff --git a/pkg/sentry/vfs/device.go b/pkg/sentry/vfs/device.go
index dde2ad79b..572d81afc 100644
--- a/pkg/sentry/vfs/device.go
+++ b/pkg/sentry/vfs/device.go
@@ -18,7 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/context"
- "gvisor.dev/gvisor/pkg/syserror"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
)
// DeviceKind indicates whether a device is a block or character device.
@@ -100,7 +100,7 @@ func (vfs *VirtualFilesystem) OpenDeviceSpecialFile(ctx context.Context, mnt *Mo
defer vfs.devicesMu.RUnlock()
rd, ok := vfs.devices[tup]
if !ok {
- return nil, syserror.ENXIO
+ return nil, linuxerr.ENXIO
}
return rd.dev.Open(ctx, mnt, d, *opts)
}
@@ -120,7 +120,7 @@ func (vfs *VirtualFilesystem) GetAnonBlockDevMinor() (uint32, error) {
}
minor++
}
- return 0, syserror.EMFILE
+ return 0, linuxerr.EMFILE
}
// PutAnonBlockDevMinor deallocates a minor device number returned by a
diff --git a/pkg/sentry/vfs/epoll.go b/pkg/sentry/vfs/epoll.go
index ae004b371..04bc4d10c 100644
--- a/pkg/sentry/vfs/epoll.go
+++ b/pkg/sentry/vfs/epoll.go
@@ -17,8 +17,8 @@ package vfs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -174,7 +174,7 @@ func (ep *EpollInstance) AddInterest(file *FileDescription, num int32, event lin
// that cyclic polling is not introduced after the check.
defer epollCycleMu.Unlock()
if subep.mightPoll(ep) {
- return syserror.ELOOP
+ return linuxerr.ELOOP
}
}
@@ -187,7 +187,7 @@ func (ep *EpollInstance) AddInterest(file *FileDescription, num int32, event lin
num: num,
}
if _, ok := ep.interest[key]; ok {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
// Register interest in file.
@@ -258,7 +258,7 @@ func (ep *EpollInstance) ModifyInterest(file *FileDescription, num int32, event
num: num,
}]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// Update epi for the next call to ep.ReadEvents().
@@ -294,7 +294,7 @@ func (ep *EpollInstance) DeleteInterest(file *FileDescription, num int32) error
num: num,
}]
if !ok {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// Unregister from the file so that epi will no longer be readied.
diff --git a/pkg/sentry/vfs/file_description.go b/pkg/sentry/vfs/file_description.go
index ef8d8a813..ca3303dec 100644
--- a/pkg/sentry/vfs/file_description.go
+++ b/pkg/sentry/vfs/file_description.go
@@ -20,13 +20,13 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -252,7 +252,7 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, creds *auth.Crede
return err
}
if (stat.AttributesMask&linux.STATX_ATTR_APPEND != 0) && (stat.Attributes&linux.STATX_ATTR_APPEND != 0) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
if (flags&linux.O_NOATIME != 0) && (oldFlags&linux.O_NOATIME == 0) {
@@ -266,14 +266,14 @@ func (fd *FileDescription) SetStatusFlags(ctx context.Context, creds *auth.Crede
return err
}
if stat.Mask&linux.STATX_UID == 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if !CanActAsOwner(creds, auth.KUID(stat.UID)) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
if flags&linux.O_DIRECT != 0 && !fd.opts.AllowDirectIO {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// TODO(gvisor.dev/issue/1035): FileDescriptionImpl.SetOAsync()?
const settableFlags = linux.O_APPEND | linux.O_ASYNC | linux.O_DIRECT | linux.O_NOATIME | linux.O_NONBLOCK
@@ -567,7 +567,7 @@ func (fd *FileDescription) StatFS(ctx context.Context) (linux.Statfs, error) {
// Allocate grows file represented by FileDescription to offset + length bytes.
func (fd *FileDescription) Allocate(ctx context.Context, mode, offset, length uint64) error {
if !fd.IsWritable() {
- return syserror.EBADF
+ return linuxerr.EBADF
}
if err := fd.impl.Allocate(ctx, mode, offset, length); err != nil {
return err
@@ -602,10 +602,10 @@ func (fd *FileDescription) EventUnregister(e *waiter.Entry) {
// partial reads with a nil error.
func (fd *FileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
if fd.opts.DenyPRead {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
if !fd.readable {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
start := fsmetric.StartReadWait()
n, err := fd.impl.PRead(ctx, dst, offset, opts)
@@ -620,7 +620,7 @@ func (fd *FileDescription) PRead(ctx context.Context, dst usermem.IOSequence, of
// Read is similar to PRead, but does not specify an offset.
func (fd *FileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
if !fd.readable {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
start := fsmetric.StartReadWait()
n, err := fd.impl.Read(ctx, dst, opts)
@@ -637,10 +637,10 @@ func (fd *FileDescription) Read(ctx context.Context, dst usermem.IOSequence, opt
// return partial writes with a nil error.
func (fd *FileDescription) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
if fd.opts.DenyPWrite {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
if !fd.writable {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
n, err := fd.impl.PWrite(ctx, src, offset, opts)
if n > 0 {
@@ -652,7 +652,7 @@ func (fd *FileDescription) PWrite(ctx context.Context, src usermem.IOSequence, o
// Write is similar to PWrite, but does not specify an offset.
func (fd *FileDescription) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
if !fd.writable {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
n, err := fd.impl.Write(ctx, src, opts)
if n > 0 {
@@ -708,8 +708,8 @@ func (fd *FileDescription) ListXattr(ctx context.Context, size uint64) ([]string
return names, err
}
names, err := fd.impl.ListXattr(ctx, size)
- if err == syserror.ENOTSUP {
- // Linux doesn't actually return ENOTSUP in this case; instead,
+ if linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
+ // Linux doesn't actually return EOPNOTSUPP in this case; instead,
// fs/xattr.c:vfs_listxattr() falls back to allowing the security
// subsystem to return security extended attributes, which by default
// don't exist.
@@ -873,7 +873,7 @@ func (fd *FileDescription) ComputeLockRange(ctx context.Context, start uint64, l
}
off = int64(stat.Size)
default:
- return lock.LockRange{}, syserror.EINVAL
+ return lock.LockRange{}, linuxerr.EINVAL
}
return lock.ComputeRange(int64(start), int64(length), off)
diff --git a/pkg/sentry/vfs/file_description_impl_util.go b/pkg/sentry/vfs/file_description_impl_util.go
index 2b6f47b4b..452f5f1f9 100644
--- a/pkg/sentry/vfs/file_description_impl_util.go
+++ b/pkg/sentry/vfs/file_description_impl_util.go
@@ -17,14 +17,15 @@ package vfs
import (
"bytes"
"io"
+ "math"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -55,7 +56,7 @@ func (FileDescriptionDefaultImpl) OnClose(ctx context.Context) error {
// StatFS implements FileDescriptionImpl.StatFS analogously to
// super_operations::statfs == NULL in Linux.
func (FileDescriptionDefaultImpl) StatFS(ctx context.Context) (linux.Statfs, error) {
- return linux.Statfs{}, syserror.ENOSYS
+ return linux.Statfs{}, linuxerr.ENOSYS
}
// Allocate implements FileDescriptionImpl.Allocate analogously to
@@ -65,7 +66,7 @@ func (FileDescriptionDefaultImpl) StatFS(ctx context.Context) (linux.Statfs, err
// should technically return EISDIR. Allocate should never be called for a
// directory, because it requires a writable fd.
func (FileDescriptionDefaultImpl) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
// Readiness implements waiter.Waitable.Readiness analogously to
@@ -88,81 +89,81 @@ func (FileDescriptionDefaultImpl) EventUnregister(e *waiter.Entry) {
// PRead implements FileDescriptionImpl.PRead analogously to
// file_operations::read == file_operations::read_iter == NULL in Linux.
func (FileDescriptionDefaultImpl) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Read implements FileDescriptionImpl.Read analogously to
// file_operations::read == file_operations::read_iter == NULL in Linux.
func (FileDescriptionDefaultImpl) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// PWrite implements FileDescriptionImpl.PWrite analogously to
// file_operations::write == file_operations::write_iter == NULL in Linux.
func (FileDescriptionDefaultImpl) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Write implements FileDescriptionImpl.Write analogously to
// file_operations::write == file_operations::write_iter == NULL in Linux.
func (FileDescriptionDefaultImpl) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// IterDirents implements FileDescriptionImpl.IterDirents analogously to
// file_operations::iterate == file_operations::iterate_shared == NULL in
// Linux.
func (FileDescriptionDefaultImpl) IterDirents(ctx context.Context, cb IterDirentsCallback) error {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Seek implements FileDescriptionImpl.Seek analogously to
// file_operations::llseek == NULL in Linux.
func (FileDescriptionDefaultImpl) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Sync implements FileDescriptionImpl.Sync analogously to
// file_operations::fsync == NULL in Linux.
func (FileDescriptionDefaultImpl) Sync(ctx context.Context) error {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// ConfigureMMap implements FileDescriptionImpl.ConfigureMMap analogously to
// file_operations::mmap == NULL in Linux.
func (FileDescriptionDefaultImpl) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- return syserror.ENODEV
+ return linuxerr.ENODEV
}
// Ioctl implements FileDescriptionImpl.Ioctl analogously to
// file_operations::unlocked_ioctl == NULL in Linux.
func (FileDescriptionDefaultImpl) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
// ListXattr implements FileDescriptionImpl.ListXattr analogously to
// inode_operations::listxattr == NULL in Linux.
func (FileDescriptionDefaultImpl) ListXattr(ctx context.Context, size uint64) ([]string, error) {
// This isn't exactly accurate; see FileDescription.ListXattr.
- return nil, syserror.ENOTSUP
+ return nil, linuxerr.ENOTSUP
}
// GetXattr implements FileDescriptionImpl.GetXattr analogously to
// inode::i_opflags & IOP_XATTR == 0 in Linux.
func (FileDescriptionDefaultImpl) GetXattr(ctx context.Context, opts GetXattrOptions) (string, error) {
- return "", syserror.ENOTSUP
+ return "", linuxerr.ENOTSUP
}
// SetXattr implements FileDescriptionImpl.SetXattr analogously to
// inode::i_opflags & IOP_XATTR == 0 in Linux.
func (FileDescriptionDefaultImpl) SetXattr(ctx context.Context, opts SetXattrOptions) error {
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
}
// RemoveXattr implements FileDescriptionImpl.RemoveXattr analogously to
// inode::i_opflags & IOP_XATTR == 0 in Linux.
func (FileDescriptionDefaultImpl) RemoveXattr(ctx context.Context, name string) error {
- return syserror.ENOTSUP
+ return linuxerr.ENOTSUP
}
// DirectoryFileDescriptionDefaultImpl may be embedded by implementations of
@@ -174,27 +175,27 @@ type DirectoryFileDescriptionDefaultImpl struct{}
// Allocate implements DirectoryFileDescriptionDefaultImpl.Allocate.
func (DirectoryFileDescriptionDefaultImpl) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.EISDIR
+ return linuxerr.EISDIR
}
// PRead implements FileDescriptionImpl.PRead.
func (DirectoryFileDescriptionDefaultImpl) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Read implements FileDescriptionImpl.Read.
func (DirectoryFileDescriptionDefaultImpl) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// PWrite implements FileDescriptionImpl.PWrite.
func (DirectoryFileDescriptionDefaultImpl) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// Write implements FileDescriptionImpl.Write.
func (DirectoryFileDescriptionDefaultImpl) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
- return 0, syserror.EISDIR
+ return 0, linuxerr.EISDIR
}
// DentryMetadataFileDescriptionImpl may be embedded by implementations of
@@ -333,10 +334,10 @@ func (fd *DynamicBytesFileDescriptionImpl) Seek(ctx context.Context, offset int6
offset += fd.off
default:
// fs/seq_file:seq_lseek() rejects SEEK_END etc.
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset < 0 {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
if offset != fd.lastRead {
// Regenerate the file's contents immediately. Compare
@@ -357,7 +358,7 @@ func (fd *DynamicBytesFileDescriptionImpl) Seek(ctx context.Context, offset int6
// Preconditions: fd.mu must be locked.
func (fd *DynamicBytesFileDescriptionImpl) pwriteLocked(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
if opts.Flags&^(linux.RWF_HIPRI|linux.RWF_DSYNC|linux.RWF_SYNC) != 0 {
- return 0, syserror.EOPNOTSUPP
+ return 0, linuxerr.EOPNOTSUPP
}
limit, err := CheckLimit(ctx, offset, src.NumBytes())
if err != nil {
@@ -367,7 +368,7 @@ func (fd *DynamicBytesFileDescriptionImpl) pwriteLocked(ctx context.Context, src
writable, ok := fd.data.(WritableDynamicBytesSource)
if !ok {
- return 0, syserror.EIO
+ return 0, linuxerr.EIO
}
n, err := writable.Write(ctx, src, offset)
if err != nil {
@@ -399,6 +400,9 @@ func (fd *DynamicBytesFileDescriptionImpl) Write(ctx context.Context, src userme
// GenericConfigureMMap may be used by most implementations of
// FileDescriptionImpl.ConfigureMMap.
func GenericConfigureMMap(fd *FileDescription, m memmap.Mappable, opts *memmap.MMapOpts) error {
+ if opts.Offset+opts.Length > math.MaxInt64 {
+ return linuxerr.EOVERFLOW
+ }
opts.Mappable = m
opts.MappingIdentity = fd
fd.IncRef()
@@ -467,27 +471,27 @@ func (NoLockFD) SupportsLocks() bool {
// LockBSD implements FileDescriptionImpl.LockBSD.
func (NoLockFD) LockBSD(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, block fslock.Blocker) error {
- return syserror.ENOLCK
+ return linuxerr.ENOLCK
}
// UnlockBSD implements FileDescriptionImpl.UnlockBSD.
func (NoLockFD) UnlockBSD(ctx context.Context, uid fslock.UniqueID) error {
- return syserror.ENOLCK
+ return linuxerr.ENOLCK
}
// LockPOSIX implements FileDescriptionImpl.LockPOSIX.
func (NoLockFD) LockPOSIX(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, r fslock.LockRange, block fslock.Blocker) error {
- return syserror.ENOLCK
+ return linuxerr.ENOLCK
}
// UnlockPOSIX implements FileDescriptionImpl.UnlockPOSIX.
func (NoLockFD) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, r fslock.LockRange) error {
- return syserror.ENOLCK
+ return linuxerr.ENOLCK
}
// TestPOSIX implements FileDescriptionImpl.TestPOSIX.
func (NoLockFD) TestPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, r fslock.LockRange) (linux.Flock, error) {
- return linux.Flock{}, syserror.ENOLCK
+ return linux.Flock{}, linuxerr.ENOLCK
}
// BadLockFD implements Lock*/Unlock* portion of FileDescriptionImpl interface
@@ -503,25 +507,25 @@ func (BadLockFD) SupportsLocks() bool {
// LockBSD implements FileDescriptionImpl.LockBSD.
func (BadLockFD) LockBSD(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, block fslock.Blocker) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// UnlockBSD implements FileDescriptionImpl.UnlockBSD.
func (BadLockFD) UnlockBSD(ctx context.Context, uid fslock.UniqueID) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// LockPOSIX implements FileDescriptionImpl.LockPOSIX.
func (BadLockFD) LockPOSIX(ctx context.Context, uid fslock.UniqueID, ownerPID int32, t fslock.LockType, r fslock.LockRange, block fslock.Blocker) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// UnlockPOSIX implements FileDescriptionImpl.UnlockPOSIX.
func (BadLockFD) UnlockPOSIX(ctx context.Context, uid fslock.UniqueID, r fslock.LockRange) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// TestPOSIX implements FileDescriptionImpl.TestPOSIX.
func (BadLockFD) TestPOSIX(ctx context.Context, uid fslock.UniqueID, t fslock.LockType, r fslock.LockRange) (linux.Flock, error) {
- return linux.Flock{}, syserror.EBADF
+ return linux.Flock{}, linuxerr.EBADF
}
diff --git a/pkg/sentry/vfs/file_description_impl_util_test.go b/pkg/sentry/vfs/file_description_impl_util_test.go
index 1cd607c0a..e34a8c11b 100644
--- a/pkg/sentry/vfs/file_description_impl_util_test.go
+++ b/pkg/sentry/vfs/file_description_impl_util_test.go
@@ -23,8 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/contexttest"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -102,7 +102,7 @@ func (fd *testFD) Stat(ctx context.Context, opts StatOptions) (linux.Statx, erro
// SetStat implements FileDescriptionImpl.SetStat.
func (fd *testFD) SetStat(ctx context.Context, opts SetStatOptions) error {
- return syserror.EPERM
+ return linuxerr.EPERM
}
func TestGenCountFD(t *testing.T) {
@@ -155,11 +155,11 @@ func TestGenCountFD(t *testing.T) {
}
// Write and PWrite fails.
- if _, err := fd.Write(ctx, ioseq, WriteOptions{}); err != syserror.EIO {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
+ if _, err := fd.Write(ctx, ioseq, WriteOptions{}); !linuxerr.Equals(linuxerr.EIO, err) {
+ t.Errorf("Write: got err %v, wanted %v", err, linuxerr.EIO)
}
- if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); err != syserror.EIO {
- t.Errorf("Write: got err %v, wanted %v", err, syserror.EIO)
+ if _, err := fd.PWrite(ctx, ioseq, 0, WriteOptions{}); !linuxerr.Equals(linuxerr.EIO, err) {
+ t.Errorf("Write: got err %v, wanted %v", err, linuxerr.EIO)
}
}
@@ -215,10 +215,10 @@ func TestWritable(t *testing.T) {
if n, err := fd.Seek(ctx, 1, linux.SEEK_SET); n != 0 && err != nil {
t.Errorf("Seek: got err (%v, %v), wanted (0, nil)", n, err)
}
- if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); n != 0 && err != syserror.EINVAL {
+ if n, err := fd.Write(ctx, writeIOSeq, WriteOptions{}); n != 0 && !linuxerr.Equals(linuxerr.EINVAL, err) {
t.Errorf("Write: got err (%v, %v), wanted (0, EINVAL)", n, err)
}
- if n, err := fd.PWrite(ctx, writeIOSeq, 2, WriteOptions{}); n != 0 && err != syserror.EINVAL {
+ if n, err := fd.PWrite(ctx, writeIOSeq, 2, WriteOptions{}); n != 0 && !linuxerr.Equals(linuxerr.EINVAL, err) {
t.Errorf("PWrite: got err (%v, %v), wanted (0, EINVAL)", n, err)
}
}
diff --git a/pkg/sentry/vfs/inotify.go b/pkg/sentry/vfs/inotify.go
index 49d29e20b..17d94b341 100644
--- a/pkg/sentry/vfs/inotify.go
+++ b/pkg/sentry/vfs/inotify.go
@@ -21,11 +21,11 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -98,7 +98,7 @@ func NewInotifyFD(ctx context.Context, vfsObj *VirtualFilesystem, flags uint32)
// O_CLOEXEC affects file descriptors, so it must be handled outside of vfs.
flags &^= linux.O_CLOEXEC
if flags&^linux.O_NONBLOCK != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
id := uniqueid.GlobalFromContext(ctx)
@@ -184,23 +184,23 @@ func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask {
// PRead implements FileDescriptionImpl.PRead.
func (*Inotify) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// PWrite implements FileDescriptionImpl.PWrite.
func (*Inotify) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
- return 0, syserror.ESPIPE
+ return 0, linuxerr.ESPIPE
}
// Write implements FileDescriptionImpl.Write.
func (*Inotify) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Read implements FileDescriptionImpl.Read.
func (i *Inotify) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
if dst.NumBytes() < inotifyEventBaseSize {
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
i.evMu.Lock()
@@ -208,7 +208,7 @@ func (i *Inotify) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOpt
if i.events.Empty() {
// Nothing to read yet, tell caller to block.
- return 0, syserror.ErrWouldBlock
+ return 0, linuxerr.ErrWouldBlock
}
var writeLen int64
@@ -226,7 +226,7 @@ func (i *Inotify) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOpt
// write some events out.
return writeLen, nil
}
- return 0, syserror.EINVAL
+ return 0, linuxerr.EINVAL
}
// Linux always dequeues an available event as long as there's enough
@@ -262,7 +262,7 @@ func (i *Inotify) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallAr
return 0, err
default:
- return 0, syserror.ENOTTY
+ return 0, linuxerr.ENOTTY
}
}
@@ -332,7 +332,7 @@ func (i *Inotify) AddWatch(target *Dentry, mask uint32) (int32, error) {
if ws == nil {
// While Linux supports inotify watches on all filesystem types, watches on
// filesystems like kernfs are not generally useful, so we do not.
- return 0, syserror.EPERM
+ return 0, linuxerr.EPERM
}
// Does the target already have a watch from this inotify instance?
if existing := ws.Lookup(i.id); existing != nil {
@@ -360,7 +360,7 @@ func (i *Inotify) RmWatch(ctx context.Context, wd int32) error {
w, ok := i.watches[wd]
if !ok {
i.mu.Unlock()
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// Remove the watch from this instance.
diff --git a/pkg/sentry/vfs/lock.go b/pkg/sentry/vfs/lock.go
index cbe4d8c2d..1853cdca0 100644
--- a/pkg/sentry/vfs/lock.go
+++ b/pkg/sentry/vfs/lock.go
@@ -17,8 +17,8 @@ package vfs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
fslock "gvisor.dev/gvisor/pkg/sentry/fs/lock"
- "gvisor.dev/gvisor/pkg/syserror"
)
// FileLocks supports POSIX and BSD style locks, which correspond to fcntl(2)
@@ -47,9 +47,9 @@ func (fl *FileLocks) LockBSD(ctx context.Context, uid fslock.UniqueID, ownerID i
// Return an appropriate error for the unsuccessful lock attempt, depending on
// whether this is a blocking or non-blocking operation.
if block == nil {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
- return syserror.ERESTARTSYS
+ return linuxerr.ERESTARTSYS
}
// UnlockBSD releases a BSD-style lock on the entire file.
@@ -69,9 +69,9 @@ func (fl *FileLocks) LockPOSIX(ctx context.Context, uid fslock.UniqueID, ownerPI
// Return an appropriate error for the unsuccessful lock attempt, depending on
// whether this is a blocking or non-blocking operation.
if block == nil {
- return syserror.ErrWouldBlock
+ return linuxerr.ErrWouldBlock
}
- return syserror.ERESTARTSYS
+ return linuxerr.ERESTARTSYS
}
// UnlockPOSIX releases a POSIX-style lock on a file region.
diff --git a/pkg/sentry/vfs/memxattr/BUILD b/pkg/sentry/vfs/memxattr/BUILD
index ea82f4987..444ab42b9 100644
--- a/pkg/sentry/vfs/memxattr/BUILD
+++ b/pkg/sentry/vfs/memxattr/BUILD
@@ -8,9 +8,9 @@ go_library(
visibility = ["//pkg/sentry:internal"],
deps = [
"//pkg/abi/linux",
+ "//pkg/errors/linuxerr",
"//pkg/sentry/kernel/auth",
"//pkg/sentry/vfs",
"//pkg/sync",
- "//pkg/syserror",
],
)
diff --git a/pkg/sentry/vfs/memxattr/xattr.go b/pkg/sentry/vfs/memxattr/xattr.go
index 9b7953fa3..f0f82a4d6 100644
--- a/pkg/sentry/vfs/memxattr/xattr.go
+++ b/pkg/sentry/vfs/memxattr/xattr.go
@@ -20,10 +20,10 @@ import (
"strings"
"gvisor.dev/gvisor/pkg/abi/linux"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// SimpleExtendedAttributes implements extended attributes using a map of
@@ -49,12 +49,12 @@ func (x *SimpleExtendedAttributes) GetXattr(creds *auth.Credentials, mode linux.
value, ok := x.xattrs[opts.Name]
x.mu.RUnlock()
if !ok {
- return "", syserror.ENODATA
+ return "", linuxerr.ENODATA
}
// Check that the size of the buffer provided in getxattr(2) is large enough
// to contain the value.
if opts.Size != 0 && uint64(len(value)) > opts.Size {
- return "", syserror.ERANGE
+ return "", linuxerr.ERANGE
}
return value, nil
}
@@ -69,17 +69,17 @@ func (x *SimpleExtendedAttributes) SetXattr(creds *auth.Credentials, mode linux.
defer x.mu.Unlock()
if x.xattrs == nil {
if opts.Flags&linux.XATTR_REPLACE != 0 {
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
x.xattrs = make(map[string]string)
}
_, ok := x.xattrs[opts.Name]
if ok && opts.Flags&linux.XATTR_CREATE != 0 {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
if !ok && opts.Flags&linux.XATTR_REPLACE != 0 {
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
x.xattrs[opts.Name] = opts.Value
@@ -106,7 +106,7 @@ func (x *SimpleExtendedAttributes) ListXattr(creds *auth.Credentials, size uint6
}
x.mu.RUnlock()
if size != 0 && uint64(listSize) > size {
- return nil, syserror.ERANGE
+ return nil, linuxerr.ERANGE
}
return names, nil
}
@@ -120,7 +120,7 @@ func (x *SimpleExtendedAttributes) RemoveXattr(creds *auth.Credentials, mode lin
x.mu.Lock()
defer x.mu.Unlock()
if _, ok := x.xattrs[name]; !ok {
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
delete(x.xattrs, name)
return nil
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index f93da3af1..05a416775 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -24,9 +24,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A Mount is a replacement of a Dentry (Mount.key.point) from one Filesystem
@@ -159,7 +159,7 @@ func (vfs *VirtualFilesystem) NewMountNamespace(ctx context.Context, creds *auth
rft := vfs.getFilesystemType(fsTypeName)
if rft == nil {
ctx.Warningf("Unknown filesystem type: %s", fsTypeName)
- return nil, syserror.ENODEV
+ return nil, linuxerr.ENODEV
}
fs, root, err := rft.fsType.GetFilesystem(ctx, vfs, creds, source, opts.GetFilesystemOptions)
if err != nil {
@@ -192,10 +192,10 @@ func (vfs *VirtualFilesystem) NewDisconnectedMount(fs *Filesystem, root *Dentry,
func (vfs *VirtualFilesystem) MountDisconnected(ctx context.Context, creds *auth.Credentials, source string, fsTypeName string, opts *MountOptions) (*Mount, error) {
rft := vfs.getFilesystemType(fsTypeName)
if rft == nil {
- return nil, syserror.ENODEV
+ return nil, linuxerr.ENODEV
}
if !opts.InternalMount && !rft.opts.AllowUserMount {
- return nil, syserror.ENODEV
+ return nil, linuxerr.ENODEV
}
fs, root, err := rft.fsType.GetFilesystem(ctx, vfs, creds, source, opts.GetFilesystemOptions)
if err != nil {
@@ -224,7 +224,7 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
vd.DecRef(ctx)
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
// vd might have been mounted over between vfs.GetDentryAt() and
// vfs.mountMu.Lock().
@@ -284,7 +284,7 @@ func (vfs *VirtualFilesystem) MountAt(ctx context.Context, creds *auth.Credentia
// UmountAt removes the Mount at the given path.
func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, opts *UmountOptions) error {
if opts.Flags&^(linux.MNT_FORCE|linux.MNT_DETACH) != 0 {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// MNT_FORCE is currently unimplemented except for the permission check.
@@ -292,7 +292,7 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti
// namespace, and not in the owner user namespace for the target mount. See
// fs/namespace.c:SYSCALL_DEFINE2(umount, ...)
if opts.Flags&linux.MNT_FORCE != 0 && creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, creds.UserNamespace.Root()) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
vd, err := vfs.GetDentryAt(ctx, creds, pop, &GetDentryOptions{})
@@ -301,19 +301,19 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti
}
defer vd.DecRef(ctx)
if vd.dentry != vd.mount.root {
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
vfs.mountMu.Lock()
if mntns := MountNamespaceFromContext(ctx); mntns != nil {
defer mntns.DecRef(ctx)
if mntns != vd.mount.ns {
vfs.mountMu.Unlock()
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
if vd.mount == vd.mount.ns.root {
vfs.mountMu.Unlock()
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
}
@@ -326,7 +326,7 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti
if len(vd.mount.children) != 0 {
vfs.mounts.seq.EndWrite()
vfs.mountMu.Unlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
// We are holding a reference on vd.mount.
expectedRefs := int64(1)
@@ -336,7 +336,7 @@ func (vfs *VirtualFilesystem) UmountAt(ctx context.Context, creds *auth.Credenti
if atomic.LoadInt64(&vd.mount.refs)&^math.MinInt64 != expectedRefs { // mask out MSB
vfs.mounts.seq.EndWrite()
vfs.mountMu.Unlock()
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
}
vdsToDecRef, mountsToDecRef := vfs.umountRecursiveLocked(vd.mount, &umountRecursiveOptions{
@@ -710,7 +710,7 @@ func (vfs *VirtualFilesystem) SetMountReadOnly(mnt *Mount, ro bool) error {
func (mnt *Mount) CheckBeginWrite() error {
if atomic.AddInt64(&mnt.writers, 1) < 0 {
atomic.AddInt64(&mnt.writers, -1)
- return syserror.EROFS
+ return linuxerr.EROFS
}
return nil
}
@@ -728,7 +728,7 @@ func (mnt *Mount) setReadOnlyLocked(ro bool) error {
}
if ro {
if !atomic.CompareAndSwapInt64(&mnt.writers, 0, math.MinInt64) {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
return nil
}
diff --git a/pkg/sentry/vfs/opath.go b/pkg/sentry/vfs/opath.go
index e9651b631..da0b33b79 100644
--- a/pkg/sentry/vfs/opath.go
+++ b/pkg/sentry/vfs/opath.go
@@ -17,10 +17,10 @@ package vfs
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/memmap"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -40,77 +40,77 @@ func (fd *opathFD) Release(context.Context) {
// Allocate implements FileDescriptionImpl.Allocate.
func (fd *opathFD) Allocate(ctx context.Context, mode, offset, length uint64) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// PRead implements FileDescriptionImpl.PRead.
func (fd *opathFD) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts ReadOptions) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Read implements FileDescriptionImpl.Read.
func (fd *opathFD) Read(ctx context.Context, dst usermem.IOSequence, opts ReadOptions) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// PWrite implements FileDescriptionImpl.PWrite.
func (fd *opathFD) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts WriteOptions) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Write implements FileDescriptionImpl.Write.
func (fd *opathFD) Write(ctx context.Context, src usermem.IOSequence, opts WriteOptions) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// Ioctl implements FileDescriptionImpl.Ioctl.
func (fd *opathFD) Ioctl(ctx context.Context, uio usermem.IO, args arch.SyscallArguments) (uintptr, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// IterDirents implements FileDescriptionImpl.IterDirents.
func (fd *opathFD) IterDirents(ctx context.Context, cb IterDirentsCallback) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Seek implements FileDescriptionImpl.Seek.
func (fd *opathFD) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
- return 0, syserror.EBADF
+ return 0, linuxerr.EBADF
}
// ConfigureMMap implements FileDescriptionImpl.ConfigureMMap.
func (fd *opathFD) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// ListXattr implements FileDescriptionImpl.ListXattr.
func (fd *opathFD) ListXattr(ctx context.Context, size uint64) ([]string, error) {
- return nil, syserror.EBADF
+ return nil, linuxerr.EBADF
}
// GetXattr implements FileDescriptionImpl.GetXattr.
func (fd *opathFD) GetXattr(ctx context.Context, opts GetXattrOptions) (string, error) {
- return "", syserror.EBADF
+ return "", linuxerr.EBADF
}
// SetXattr implements FileDescriptionImpl.SetXattr.
func (fd *opathFD) SetXattr(ctx context.Context, opts SetXattrOptions) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// RemoveXattr implements FileDescriptionImpl.RemoveXattr.
func (fd *opathFD) RemoveXattr(ctx context.Context, name string) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Sync implements FileDescriptionImpl.Sync.
func (fd *opathFD) Sync(ctx context.Context) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// SetStat implements FileDescriptionImpl.SetStat.
func (fd *opathFD) SetStat(ctx context.Context, opts SetStatOptions) error {
- return syserror.EBADF
+ return linuxerr.EBADF
}
// Stat implements FileDescriptionImpl.Stat.
diff --git a/pkg/sentry/vfs/pathname.go b/pkg/sentry/vfs/pathname.go
index e4da15009..7cc68a157 100644
--- a/pkg/sentry/vfs/pathname.go
+++ b/pkg/sentry/vfs/pathname.go
@@ -16,9 +16,9 @@ package vfs
import (
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
var fspathBuilderPool = sync.Pool{
@@ -137,7 +137,7 @@ loop:
// Linux's sys_getcwd().
func (vfs *VirtualFilesystem) PathnameForGetcwd(ctx context.Context, vfsroot, vd VirtualDentry) (string, error) {
if vd.dentry.IsDead() {
- return "", syserror.ENOENT
+ return "", linuxerr.ENOENT
}
b := getFSPathBuilder()
diff --git a/pkg/sentry/vfs/permissions.go b/pkg/sentry/vfs/permissions.go
index b7704874f..953d31876 100644
--- a/pkg/sentry/vfs/permissions.go
+++ b/pkg/sentry/vfs/permissions.go
@@ -20,9 +20,9 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/limits"
- "gvisor.dev/gvisor/pkg/syserror"
)
// AccessTypes is a bitmask of Unix file permissions.
@@ -77,7 +77,7 @@ func GenericCheckPermissions(creds *auth.Credentials, ats AccessTypes, mode linu
// the caller's user namespace; compare
// kernel/capability.c:privileged_wrt_inode_uidgid().
if !kuid.In(creds.UserNamespace).Ok() || !kgid.In(creds.UserNamespace).Ok() {
- return syserror.EACCES
+ return linuxerr.EACCES
}
// CAP_DAC_READ_SEARCH allows the caller to read and search arbitrary
// directories, and read arbitrary non-directory files.
@@ -94,7 +94,7 @@ func GenericCheckPermissions(creds *auth.Credentials, ats AccessTypes, mode linu
return nil
}
}
- return syserror.EACCES
+ return linuxerr.EACCES
}
// MayLink determines whether creating a hard link to a file with the given
@@ -110,12 +110,12 @@ func MayLink(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, kgid
// Only regular files can be hard linked.
if mode.FileType() != linux.S_IFREG {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Setuid files should not get pinned to the filesystem.
if mode&linux.S_ISUID != 0 {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// Executable setgid files should not get pinned to the filesystem, but we
@@ -123,7 +123,7 @@ func MayLink(creds *auth.Credentials, mode linux.FileMode, kuid auth.KUID, kgid
// Hardlinking to unreadable or unwritable sources is dangerous.
if err := GenericCheckPermissions(creds, MayRead|MayWrite, mode, kuid, kgid); err != nil {
- return syserror.EPERM
+ return linuxerr.EPERM
}
return nil
}
@@ -194,12 +194,12 @@ func CheckSetStat(ctx context.Context, creds *auth.Credentials, opts *SetStatOpt
return err
}
if limit < int64(stat.Size) {
- return syserror.ErrExceedsFileSizeLimit
+ return linuxerr.ErrExceedsFileSizeLimit
}
}
if stat.Mask&linux.STATX_MODE != 0 {
if !CanActAsOwner(creds, kuid) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
// TODO(b/30815691): "If the calling process is not privileged (Linux:
// does not have the CAP_FSETID capability), and the group of the file
@@ -210,13 +210,13 @@ func CheckSetStat(ctx context.Context, creds *auth.Credentials, opts *SetStatOpt
if stat.Mask&linux.STATX_UID != 0 {
if !((creds.EffectiveKUID == kuid && auth.KUID(stat.UID) == kuid) ||
HasCapabilityOnFile(creds, linux.CAP_CHOWN, kuid, kgid)) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
if stat.Mask&linux.STATX_GID != 0 {
if !((creds.EffectiveKUID == kuid && creds.InGroup(auth.KGID(stat.GID))) ||
HasCapabilityOnFile(creds, linux.CAP_CHOWN, kuid, kgid)) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
if opts.NeedWritePerm && !creds.HasCapability(linux.CAP_DAC_OVERRIDE) {
@@ -229,7 +229,7 @@ func CheckSetStat(ctx context.Context, creds *auth.Credentials, opts *SetStatOpt
if (stat.Mask&linux.STATX_ATIME != 0 && stat.Atime.Nsec != linux.UTIME_NOW) ||
(stat.Mask&linux.STATX_MTIME != 0 && stat.Mtime.Nsec != linux.UTIME_NOW) ||
(stat.Mask&linux.STATX_CTIME != 0 && stat.Ctime.Nsec != linux.UTIME_NOW) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
if err := GenericCheckPermissions(creds, MayWrite, mode, kuid, kgid); err != nil {
return err
@@ -252,7 +252,7 @@ func CheckDeleteSticky(creds *auth.Credentials, parentMode linux.FileMode, paren
HasCapabilityOnFile(creds, linux.CAP_FOWNER, childKUID, childKGID) {
return nil
}
- return syserror.EPERM
+ return linuxerr.EPERM
}
// CanActAsOwner returns true if creds can act as the owner of a file with the
@@ -281,7 +281,7 @@ func CheckLimit(ctx context.Context, offset, size int64) (int64, error) {
return size, nil
}
if offset >= int64(fileSizeLimit) {
- return 0, syserror.ErrExceedsFileSizeLimit
+ return 0, linuxerr.ErrExceedsFileSizeLimit
}
remaining := int64(fileSizeLimit) - offset
if remaining < size {
@@ -306,9 +306,9 @@ func CheckXattrPermissions(creds *auth.Credentials, ats AccessTypes, mode linux.
return nil
}
if ats.MayWrite() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
- return syserror.ENODATA
+ return linuxerr.ENODATA
case strings.HasPrefix(name, linux.XATTR_USER_PREFIX):
// In the user.* namespace, only regular files and directories can have
// extended attributes. For sticky directories, only the owner and
@@ -316,12 +316,12 @@ func CheckXattrPermissions(creds *auth.Credentials, ats AccessTypes, mode linux.
filetype := mode.FileType()
if filetype != linux.ModeRegular && filetype != linux.ModeDirectory {
if ats.MayWrite() {
- return syserror.EPERM
+ return linuxerr.EPERM
}
- return syserror.ENODATA
+ return linuxerr.ENODATA
}
if filetype == linux.ModeDirectory && mode&linux.ModeSticky != 0 && ats.MayWrite() && !CanActAsOwner(creds, kuid) {
- return syserror.EPERM
+ return linuxerr.EPERM
}
}
return nil
diff --git a/pkg/sentry/vfs/resolving_path.go b/pkg/sentry/vfs/resolving_path.go
index 97b898aba..7fd7f000d 100644
--- a/pkg/sentry/vfs/resolving_path.go
+++ b/pkg/sentry/vfs/resolving_path.go
@@ -19,10 +19,10 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// ResolvingPath represents the state of an in-progress path resolution, shared
@@ -327,10 +327,10 @@ func (rp *ResolvingPath) ShouldFollowSymlink() bool {
// Postconditions: If HandleSymlink returns a nil error, then !rp.Done().
func (rp *ResolvingPath) HandleSymlink(target string) error {
if rp.symlinks >= linux.MaxSymlinkTraversals {
- return syserror.ELOOP
+ return linuxerr.ELOOP
}
if len(target) == 0 {
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
rp.symlinks++
targetPath := fspath.Parse(target)
@@ -377,7 +377,7 @@ func (rp *ResolvingPath) relpathPrepend(path fspath.Path) {
// Preconditions: !rp.Done().
func (rp *ResolvingPath) HandleJump(target VirtualDentry) error {
if rp.symlinks >= linux.MaxSymlinkTraversals {
- return syserror.ELOOP
+ return linuxerr.ELOOP
}
rp.symlinks++
// Consume the path component that represented the magic link.
diff --git a/pkg/sentry/vfs/vfs.go b/pkg/sentry/vfs/vfs.go
index 87fdcf403..1b2a668c0 100644
--- a/pkg/sentry/vfs/vfs.go
+++ b/pkg/sentry/vfs/vfs.go
@@ -42,12 +42,12 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/fspath"
"gvisor.dev/gvisor/pkg/sentry/fsmetric"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sync"
- "gvisor.dev/gvisor/pkg/syserror"
)
// A VirtualFilesystem (VFS for short) combines Filesystems in trees of Mounts.
@@ -278,14 +278,14 @@ func (vfs *VirtualFilesystem) LinkAt(ctx context.Context, creds *auth.Credential
if !newpop.Path.Begin.Ok() {
oldVD.DecRef(ctx)
if newpop.Path.Absolute {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if newpop.FollowFinalSymlink {
oldVD.DecRef(ctx)
ctx.Warningf("VirtualFilesystem.LinkAt: file creation paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, newpop)
@@ -315,13 +315,13 @@ func (vfs *VirtualFilesystem) MkdirAt(ctx context.Context, creds *auth.Credentia
// pop.Path should not be empty in operations that create/delete files.
// This is consistent with mkdirat(dirfd, "", mode).
if pop.Path.Absolute {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if pop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.MkdirAt: file creation paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
// "Under Linux, apart from the permission bits, the S_ISVTX mode bit is
// also honored." - mkdir(2)
@@ -347,19 +347,19 @@ func (vfs *VirtualFilesystem) MkdirAt(ctx context.Context, creds *auth.Credentia
}
// MknodAt creates a file of the given mode at the given path. It returns an
-// error from the syserror package.
+// error from the linuxerr package.
func (vfs *VirtualFilesystem) MknodAt(ctx context.Context, creds *auth.Credentials, pop *PathOperation, opts *MknodOptions) error {
if !pop.Path.Begin.Ok() {
// pop.Path should not be empty in operations that create/delete files.
// This is consistent with mknodat(dirfd, "", mode, dev).
if pop.Path.Absolute {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if pop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.MknodAt: file creation paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, pop)
@@ -402,13 +402,13 @@ func (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credential
// filesystem implementations that do not support it).
if opts.Flags&linux.O_TMPFILE != 0 {
if opts.Flags&linux.O_DIRECTORY == 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
if opts.Flags&linux.O_CREAT != 0 {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
if opts.Flags&linux.O_ACCMODE == linux.O_RDONLY {
- return nil, syserror.EINVAL
+ return nil, linuxerr.EINVAL
}
}
// O_PATH causes most other flags to be ignored.
@@ -426,9 +426,7 @@ func (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credential
if opts.Flags&linux.O_DIRECTORY != 0 {
rp.mustBeDir = true
}
- // Ignore O_PATH for verity, as verity performs extra operations on the fd for verification.
- // The underlying filesystem that verity wraps opens the fd with O_PATH.
- if opts.Flags&linux.O_PATH != 0 && rp.mount.fs.FilesystemType().Name() != "verity" {
+ if opts.Flags&linux.O_PATH != 0 {
vd, err := vfs.GetDentryAt(ctx, creds, pop, &GetDentryOptions{})
if err != nil {
return nil, err
@@ -448,7 +446,7 @@ func (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credential
if opts.FileExec {
if fd.Mount().Flags.NoExec {
fd.DecRef(ctx)
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
// Only a regular file can be executed.
@@ -459,7 +457,7 @@ func (vfs *VirtualFilesystem) OpenAt(ctx context.Context, creds *auth.Credential
}
if stat.Mask&linux.STATX_TYPE == 0 || stat.Mode&linux.S_IFMT != linux.S_IFREG {
fd.DecRef(ctx)
- return nil, syserror.EACCES
+ return nil, linuxerr.EACCES
}
}
@@ -493,13 +491,13 @@ func (vfs *VirtualFilesystem) ReadlinkAt(ctx context.Context, creds *auth.Creden
func (vfs *VirtualFilesystem) RenameAt(ctx context.Context, creds *auth.Credentials, oldpop, newpop *PathOperation, opts *RenameOptions) error {
if !oldpop.Path.Begin.Ok() {
if oldpop.Path.Absolute {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if oldpop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.RenameAt: source path can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
oldParentVD, oldName, err := vfs.getParentDirAndName(ctx, creds, oldpop)
@@ -508,20 +506,20 @@ func (vfs *VirtualFilesystem) RenameAt(ctx context.Context, creds *auth.Credenti
}
if oldName == "." || oldName == ".." {
oldParentVD.DecRef(ctx)
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
if !newpop.Path.Begin.Ok() {
oldParentVD.DecRef(ctx)
if newpop.Path.Absolute {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if newpop.FollowFinalSymlink {
oldParentVD.DecRef(ctx)
ctx.Warningf("VirtualFilesystem.RenameAt: destination path can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, newpop)
@@ -555,13 +553,13 @@ func (vfs *VirtualFilesystem) RmdirAt(ctx context.Context, creds *auth.Credentia
// pop.Path should not be empty in operations that create/delete files.
// This is consistent with unlinkat(dirfd, "", AT_REMOVEDIR).
if pop.Path.Absolute {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if pop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.RmdirAt: file deletion paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, pop)
@@ -638,13 +636,13 @@ func (vfs *VirtualFilesystem) SymlinkAt(ctx context.Context, creds *auth.Credent
// pop.Path should not be empty in operations that create/delete files.
// This is consistent with symlinkat(oldpath, newdirfd, "").
if pop.Path.Absolute {
- return syserror.EEXIST
+ return linuxerr.EEXIST
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if pop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.SymlinkAt: file creation paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, pop)
@@ -672,13 +670,13 @@ func (vfs *VirtualFilesystem) UnlinkAt(ctx context.Context, creds *auth.Credenti
// pop.Path should not be empty in operations that create/delete files.
// This is consistent with unlinkat(dirfd, "", 0).
if pop.Path.Absolute {
- return syserror.EBUSY
+ return linuxerr.EBUSY
}
- return syserror.ENOENT
+ return linuxerr.ENOENT
}
if pop.FollowFinalSymlink {
ctx.Warningf("VirtualFilesystem.UnlinkAt: file deletion paths can't follow final symlink")
- return syserror.EINVAL
+ return linuxerr.EINVAL
}
rp := vfs.getResolvingPath(creds, pop)
@@ -731,8 +729,8 @@ func (vfs *VirtualFilesystem) ListXattrAt(ctx context.Context, creds *auth.Crede
rp.Release(ctx)
return names, nil
}
- if err == syserror.ENOTSUP {
- // Linux doesn't actually return ENOTSUP in this case; instead,
+ if linuxerr.Equals(linuxerr.EOPNOTSUPP, err) {
+ // Linux doesn't actually return EOPNOTSUPP in this case; instead,
// fs/xattr.c:vfs_listxattr() falls back to allowing the security
// subsystem to return security extended attributes, which by
// default don't exist.
@@ -830,14 +828,14 @@ func (vfs *VirtualFilesystem) MkdirAllAt(ctx context.Context, currentPath string
Path: fspath.Parse(currentPath),
}
stat, err := vfs.StatAt(ctx, creds, pop, &StatOptions{Mask: linux.STATX_TYPE})
- switch err {
- case nil:
+ switch {
+ case err == nil:
if stat.Mask&linux.STATX_TYPE == 0 || stat.Mode&linux.FileTypeMask != linux.ModeDirectory {
- return syserror.ENOTDIR
+ return linuxerr.ENOTDIR
}
// Directory already exists.
return nil
- case syserror.ENOENT:
+ case linuxerr.Equals(linuxerr.ENOENT, err):
// Expected, we will create the dir.
default:
return fmt.Errorf("stat failed for %q during directory creation: %w", currentPath, err)
@@ -871,7 +869,7 @@ func (vfs *VirtualFilesystem) MakeSyntheticMountpoint(ctx context.Context, targe
Root: root,
Start: root,
Path: fspath.Parse(target),
- }, mkdirOpts); err != nil && err != syserror.EEXIST {
+ }, mkdirOpts); err != nil && !linuxerr.Equals(linuxerr.EEXIST, err) {
return fmt.Errorf("failed to create mountpoint %q: %w", target, err)
}
return nil
diff --git a/pkg/sentry/watchdog/watchdog.go b/pkg/sentry/watchdog/watchdog.go
index 8d563d53a..e8f7d1f01 100644
--- a/pkg/sentry/watchdog/watchdog.go
+++ b/pkg/sentry/watchdog/watchdog.go
@@ -77,11 +77,6 @@ var DefaultOpts = Opts{
// trigger it.
const descheduleThreshold = 1 * time.Second
-var (
- stuckStartup = metric.MustCreateNewUint64Metric("/watchdog/stuck_startup_detected", true /* sync */, "Incremented once on startup watchdog timeout")
- stuckTasks = metric.MustCreateNewUint64Metric("/watchdog/stuck_tasks_detected", true /* sync */, "Cumulative count of stuck tasks detected")
-)
-
// Amount of time to wait before dumping the stack to the log again when the same task(s) remains stuck.
var stackDumpSameTaskPeriod = time.Minute
@@ -242,7 +237,6 @@ func (w *Watchdog) waitForStart() {
return
}
- stuckStartup.Increment()
metric.WeirdnessMetric.Increment("watchdog_stuck_startup")
var buf bytes.Buffer
@@ -316,7 +310,6 @@ func (w *Watchdog) runTurn() {
// unless they are surrounded by
// Task.UninterruptibleSleepStart/Finish.
tc = &offender{lastUpdateTime: lastUpdateTime}
- stuckTasks.Increment()
metric.WeirdnessMetric.Increment("watchdog_stuck_tasks")
newTaskFound = true
}